dp_main.c 273 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_htt.h"
  30. #include "dp_types.h"
  31. #include "dp_internal.h"
  32. #include "dp_tx.h"
  33. #include "dp_tx_desc.h"
  34. #include "dp_rx.h"
  35. #include <cdp_txrx_handle.h>
  36. #include <wlan_cfg.h>
  37. #include "cdp_txrx_cmn_struct.h"
  38. #include "cdp_txrx_stats_struct.h"
  39. #include "cdp_txrx_cmn_reg.h"
  40. #include <qdf_util.h>
  41. #include "dp_peer.h"
  42. #include "dp_rx_mon.h"
  43. #include "htt_stats.h"
  44. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  45. #include "cfg_ucfg_api.h"
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. #include "cdp_txrx_flow_ctrl_v2.h"
  48. #else
  49. static inline void
  50. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  51. {
  52. return;
  53. }
  54. #endif
  55. #include "dp_ipa.h"
  56. #include "dp_cal_client_api.h"
  57. #ifdef CONFIG_MCL
  58. extern int con_mode_monitor;
  59. #ifndef REMOVE_PKT_LOG
  60. #include <pktlog_ac_api.h>
  61. #include <pktlog_ac.h>
  62. #endif
  63. #endif
  64. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
  65. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  66. static struct dp_soc *
  67. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  68. struct ol_if_ops *ol_ops, uint16_t device_id);
  69. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  70. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  71. uint8_t *peer_mac_addr,
  72. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  73. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
  74. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  75. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  76. #define DP_INTR_POLL_TIMER_MS 10
  77. /* Generic AST entry aging timer value */
  78. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  79. /* WDS AST entry aging timer value */
  80. #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
  81. #define DP_WDS_AST_AGING_TIMER_CNT \
  82. ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
  83. #define DP_MCS_LENGTH (6*MAX_MCS)
  84. #define DP_NSS_LENGTH (6*SS_COUNT)
  85. #define DP_MU_GROUP_SHOW 16
  86. #define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW)
  87. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  88. #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
  89. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  90. #define DP_MAX_MCS_STRING_LEN 30
  91. #define DP_CURR_FW_STATS_AVAIL 19
  92. #define DP_HTT_DBG_EXT_STATS_MAX 256
  93. #define DP_MAX_SLEEP_TIME 100
  94. #ifndef QCA_WIFI_3_0_EMU
  95. #define SUSPEND_DRAIN_WAIT 500
  96. #else
  97. #define SUSPEND_DRAIN_WAIT 3000
  98. #endif
  99. #ifdef IPA_OFFLOAD
  100. /* Exclude IPA rings from the interrupt context */
  101. #define TX_RING_MASK_VAL 0xb
  102. #define RX_RING_MASK_VAL 0x7
  103. #else
  104. #define TX_RING_MASK_VAL 0xF
  105. #define RX_RING_MASK_VAL 0xF
  106. #endif
  107. #define STR_MAXLEN 64
  108. #define DP_PPDU_STATS_CFG_ALL 0xFFFF
  109. /* PPDU stats mask sent to FW to enable enhanced stats */
  110. #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
  111. /* PPDU stats mask sent to FW to support debug sniffer feature */
  112. #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
  113. /* PPDU stats mask sent to FW to support BPR feature*/
  114. #define DP_PPDU_STATS_CFG_BPR 0x2000
  115. /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
  116. #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
  117. DP_PPDU_STATS_CFG_ENH_STATS)
  118. /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
  119. #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
  120. DP_PPDU_TXLITE_STATS_BITMASK_CFG)
  121. #define RNG_ERR "SRNG setup failed for"
  122. /**
  123. * default_dscp_tid_map - Default DSCP-TID mapping
  124. *
  125. * DSCP TID
  126. * 000000 0
  127. * 001000 1
  128. * 010000 2
  129. * 011000 3
  130. * 100000 4
  131. * 101000 5
  132. * 110000 6
  133. * 111000 7
  134. */
  135. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  136. 0, 0, 0, 0, 0, 0, 0, 0,
  137. 1, 1, 1, 1, 1, 1, 1, 1,
  138. 2, 2, 2, 2, 2, 2, 2, 2,
  139. 3, 3, 3, 3, 3, 3, 3, 3,
  140. 4, 4, 4, 4, 4, 4, 4, 4,
  141. 5, 5, 5, 5, 5, 5, 5, 5,
  142. 6, 6, 6, 6, 6, 6, 6, 6,
  143. 7, 7, 7, 7, 7, 7, 7, 7,
  144. };
  145. /*
  146. * struct dp_rate_debug
  147. *
  148. * @mcs_type: print string for a given mcs
  149. * @valid: valid mcs rate?
  150. */
  151. struct dp_rate_debug {
  152. char mcs_type[DP_MAX_MCS_STRING_LEN];
  153. uint8_t valid;
  154. };
  155. #define MCS_VALID 1
  156. #define MCS_INVALID 0
  157. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  158. {
  159. {"OFDM 48 Mbps", MCS_VALID},
  160. {"OFDM 24 Mbps", MCS_VALID},
  161. {"OFDM 12 Mbps", MCS_VALID},
  162. {"OFDM 6 Mbps ", MCS_VALID},
  163. {"OFDM 54 Mbps", MCS_VALID},
  164. {"OFDM 36 Mbps", MCS_VALID},
  165. {"OFDM 18 Mbps", MCS_VALID},
  166. {"OFDM 9 Mbps ", MCS_VALID},
  167. {"INVALID ", MCS_INVALID},
  168. {"INVALID ", MCS_INVALID},
  169. {"INVALID ", MCS_INVALID},
  170. {"INVALID ", MCS_INVALID},
  171. {"INVALID ", MCS_VALID},
  172. },
  173. {
  174. {"CCK 11 Mbps Long ", MCS_VALID},
  175. {"CCK 5.5 Mbps Long ", MCS_VALID},
  176. {"CCK 2 Mbps Long ", MCS_VALID},
  177. {"CCK 1 Mbps Long ", MCS_VALID},
  178. {"CCK 11 Mbps Short ", MCS_VALID},
  179. {"CCK 5.5 Mbps Short", MCS_VALID},
  180. {"CCK 2 Mbps Short ", MCS_VALID},
  181. {"INVALID ", MCS_INVALID},
  182. {"INVALID ", MCS_INVALID},
  183. {"INVALID ", MCS_INVALID},
  184. {"INVALID ", MCS_INVALID},
  185. {"INVALID ", MCS_INVALID},
  186. {"INVALID ", MCS_VALID},
  187. },
  188. {
  189. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  190. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  191. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  192. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  193. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  194. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  195. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  196. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  197. {"INVALID ", MCS_INVALID},
  198. {"INVALID ", MCS_INVALID},
  199. {"INVALID ", MCS_INVALID},
  200. {"INVALID ", MCS_INVALID},
  201. {"INVALID ", MCS_VALID},
  202. },
  203. {
  204. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  205. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  206. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  207. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  208. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  209. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  210. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  211. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  212. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  213. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  214. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  215. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  216. {"INVALID ", MCS_VALID},
  217. },
  218. {
  219. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  220. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  221. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  222. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  223. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  224. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  225. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  226. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  227. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  228. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  229. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  230. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  231. {"INVALID ", MCS_VALID},
  232. }
  233. };
  234. /**
  235. * dp_cpu_ring_map_type - dp tx cpu ring map
  236. * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
  237. * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
  238. * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
  239. * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
  240. * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
  241. * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
  242. */
  243. enum dp_cpu_ring_map_types {
  244. DP_NSS_DEFAULT_MAP,
  245. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  246. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  247. DP_NSS_DBDC_OFFLOADED_MAP,
  248. DP_NSS_DBTC_OFFLOADED_MAP,
  249. DP_NSS_CPU_RING_MAP_MAX
  250. };
  251. /**
  252. * @brief Cpu to tx ring map
  253. */
  254. #ifdef CONFIG_WIN
  255. static uint8_t
  256. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  257. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  258. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  259. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  260. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  261. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  262. };
  263. #else
  264. static uint8_t
  265. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  266. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  267. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  268. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  269. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  270. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  271. };
  272. #endif
  273. /**
  274. * @brief Select the type of statistics
  275. */
  276. enum dp_stats_type {
  277. STATS_FW = 0,
  278. STATS_HOST = 1,
  279. STATS_TYPE_MAX = 2,
  280. };
  281. /**
  282. * @brief General Firmware statistics options
  283. *
  284. */
  285. enum dp_fw_stats {
  286. TXRX_FW_STATS_INVALID = -1,
  287. };
  288. /**
  289. * dp_stats_mapping_table - Firmware and Host statistics
  290. * currently supported
  291. */
  292. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  293. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  294. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  295. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  296. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  297. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  298. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  299. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  300. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  301. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  302. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  303. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  304. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  305. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  306. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  307. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  308. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  309. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  310. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  311. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  312. /* Last ENUM for HTT FW STATS */
  313. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  314. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  315. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  316. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  317. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  318. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  319. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  320. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  321. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  322. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  323. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  324. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  325. };
  326. /* MCL specific functions */
  327. #ifdef CONFIG_MCL
  328. /**
  329. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  330. * @soc: pointer to dp_soc handle
  331. * @intr_ctx_num: interrupt context number for which mon mask is needed
  332. *
  333. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  334. * This function is returning 0, since in interrupt mode(softirq based RX),
  335. * we donot want to process monitor mode rings in a softirq.
  336. *
  337. * So, in case packet log is enabled for SAP/STA/P2P modes,
  338. * regular interrupt processing will not process monitor mode rings. It would be
  339. * done in a separate timer context.
  340. *
  341. * Return: 0
  342. */
  343. static inline
  344. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  345. {
  346. return 0;
  347. }
  348. /*
  349. * dp_service_mon_rings()- timer to reap monitor rings
  350. * reqd as we are not getting ppdu end interrupts
  351. * @arg: SoC Handle
  352. *
  353. * Return:
  354. *
  355. */
  356. static void dp_service_mon_rings(void *arg)
  357. {
  358. struct dp_soc *soc = (struct dp_soc *)arg;
  359. int ring = 0, work_done, mac_id;
  360. struct dp_pdev *pdev = NULL;
  361. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  362. pdev = soc->pdev_list[ring];
  363. if (!pdev)
  364. continue;
  365. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  366. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  367. pdev->pdev_id);
  368. work_done = dp_mon_process(soc, mac_for_pdev,
  369. QCA_NAPI_BUDGET);
  370. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  371. FL("Reaped %d descs from Monitor rings"),
  372. work_done);
  373. }
  374. }
  375. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  376. }
  377. #ifndef REMOVE_PKT_LOG
  378. /**
  379. * dp_pkt_log_init() - API to initialize packet log
  380. * @ppdev: physical device handle
  381. * @scn: HIF context
  382. *
  383. * Return: none
  384. */
  385. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  386. {
  387. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  388. if (handle->pkt_log_init) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "%s: Packet log not initialized", __func__);
  391. return;
  392. }
  393. pktlog_sethandle(&handle->pl_dev, scn);
  394. pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
  395. if (pktlogmod_init(scn)) {
  396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  397. "%s: pktlogmod_init failed", __func__);
  398. handle->pkt_log_init = false;
  399. } else {
  400. handle->pkt_log_init = true;
  401. }
  402. }
  403. /**
  404. * dp_pkt_log_con_service() - connect packet log service
  405. * @ppdev: physical device handle
  406. * @scn: device context
  407. *
  408. * Return: none
  409. */
  410. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  411. {
  412. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  413. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  414. pktlog_htc_attach();
  415. }
  416. /**
  417. * dp_get_num_rx_contexts() - get number of RX contexts
  418. * @soc_hdl: cdp opaque soc handle
  419. *
  420. * Return: number of RX contexts
  421. */
  422. static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
  423. {
  424. int i;
  425. int num_rx_contexts = 0;
  426. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  427. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  428. if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
  429. num_rx_contexts++;
  430. return num_rx_contexts;
  431. }
  432. /**
  433. * dp_pktlogmod_exit() - API to cleanup pktlog info
  434. * @handle: Pdev handle
  435. *
  436. * Return: none
  437. */
  438. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  439. {
  440. void *scn = (void *)handle->soc->hif_handle;
  441. if (!scn) {
  442. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  443. "%s: Invalid hif(scn) handle", __func__);
  444. return;
  445. }
  446. pktlogmod_exit(scn);
  447. handle->pkt_log_init = false;
  448. }
  449. #endif
  450. #else
  451. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  452. /**
  453. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  454. * @soc: pointer to dp_soc handle
  455. * @intr_ctx_num: interrupt context number for which mon mask is needed
  456. *
  457. * Return: mon mask value
  458. */
  459. static inline
  460. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  461. {
  462. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  463. }
  464. #endif
  465. /**
  466. * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
  467. * @cdp_opaque_vdev: pointer to cdp_vdev
  468. *
  469. * Return: pointer to dp_vdev
  470. */
  471. static
  472. struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
  473. {
  474. return (struct dp_vdev *)cdp_opaque_vdev;
  475. }
  476. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  477. struct cdp_peer *peer_hdl,
  478. uint8_t *mac_addr,
  479. enum cdp_txrx_ast_entry_type type,
  480. uint32_t flags)
  481. {
  482. return dp_peer_add_ast((struct dp_soc *)soc_hdl,
  483. (struct dp_peer *)peer_hdl,
  484. mac_addr,
  485. type,
  486. flags);
  487. }
  488. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  489. struct cdp_peer *peer_hdl,
  490. uint8_t *wds_macaddr,
  491. uint32_t flags)
  492. {
  493. int status = -1;
  494. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  495. struct dp_ast_entry *ast_entry = NULL;
  496. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  497. qdf_spin_lock_bh(&soc->ast_lock);
  498. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  499. peer->vdev->pdev->pdev_id);
  500. if (ast_entry) {
  501. status = dp_peer_update_ast(soc,
  502. peer,
  503. ast_entry, flags);
  504. }
  505. qdf_spin_unlock_bh(&soc->ast_lock);
  506. return status;
  507. }
  508. /*
  509. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  510. * @soc_handle: Datapath SOC handle
  511. * @wds_macaddr: WDS entry MAC Address
  512. * Return: None
  513. */
  514. static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  515. uint8_t *wds_macaddr, void *vdev_handle)
  516. {
  517. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  518. struct dp_ast_entry *ast_entry = NULL;
  519. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  520. qdf_spin_lock_bh(&soc->ast_lock);
  521. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  522. vdev->pdev->pdev_id);
  523. if (ast_entry) {
  524. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  525. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  526. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
  527. ast_entry->is_active = TRUE;
  528. }
  529. }
  530. qdf_spin_unlock_bh(&soc->ast_lock);
  531. }
  532. /*
  533. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  534. * @soc: Datapath SOC handle
  535. *
  536. * Return: None
  537. */
  538. static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  539. void *vdev_hdl)
  540. {
  541. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  542. struct dp_pdev *pdev;
  543. struct dp_vdev *vdev;
  544. struct dp_peer *peer;
  545. struct dp_ast_entry *ase, *temp_ase;
  546. int i;
  547. qdf_spin_lock_bh(&soc->ast_lock);
  548. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  549. pdev = soc->pdev_list[i];
  550. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  551. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  552. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  553. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  554. if ((ase->type ==
  555. CDP_TXRX_AST_TYPE_STATIC) ||
  556. (ase->type ==
  557. CDP_TXRX_AST_TYPE_SELF) ||
  558. (ase->type ==
  559. CDP_TXRX_AST_TYPE_STA_BSS))
  560. continue;
  561. ase->is_active = TRUE;
  562. }
  563. }
  564. }
  565. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  566. }
  567. qdf_spin_unlock_bh(&soc->ast_lock);
  568. }
  569. /*
  570. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  571. * @soc: Datapath SOC handle
  572. *
  573. * Return: None
  574. */
  575. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  576. {
  577. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  578. struct dp_pdev *pdev;
  579. struct dp_vdev *vdev;
  580. struct dp_peer *peer;
  581. struct dp_ast_entry *ase, *temp_ase;
  582. int i;
  583. qdf_spin_lock_bh(&soc->ast_lock);
  584. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  585. pdev = soc->pdev_list[i];
  586. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  587. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  588. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  589. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  590. if ((ase->type ==
  591. CDP_TXRX_AST_TYPE_STATIC) ||
  592. (ase->type ==
  593. CDP_TXRX_AST_TYPE_SELF) ||
  594. (ase->type ==
  595. CDP_TXRX_AST_TYPE_STA_BSS))
  596. continue;
  597. dp_peer_del_ast(soc, ase);
  598. }
  599. }
  600. }
  601. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  602. }
  603. qdf_spin_unlock_bh(&soc->ast_lock);
  604. }
  605. /**
  606. * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
  607. * and return ast entry information
  608. * of first ast entry found in the
  609. * table with given mac address
  610. *
  611. * @soc : data path soc handle
  612. * @ast_mac_addr : AST entry mac address
  613. * @ast_entry_info : ast entry information
  614. *
  615. * return : true if ast entry found with ast_mac_addr
  616. * false if ast entry not found
  617. */
  618. static bool dp_peer_get_ast_info_by_soc_wifi3
  619. (struct cdp_soc_t *soc_hdl,
  620. uint8_t *ast_mac_addr,
  621. struct cdp_ast_entry_info *ast_entry_info)
  622. {
  623. struct dp_ast_entry *ast_entry;
  624. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  625. qdf_spin_lock_bh(&soc->ast_lock);
  626. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  627. if (ast_entry && !ast_entry->delete_in_progress) {
  628. ast_entry_info->type = ast_entry->type;
  629. ast_entry_info->pdev_id = ast_entry->pdev_id;
  630. ast_entry_info->vdev_id = ast_entry->vdev_id;
  631. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  632. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  633. &ast_entry->peer->mac_addr.raw[0],
  634. DP_MAC_ADDR_LEN);
  635. qdf_spin_unlock_bh(&soc->ast_lock);
  636. return true;
  637. }
  638. qdf_spin_unlock_bh(&soc->ast_lock);
  639. return false;
  640. }
  641. /**
  642. * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
  643. * and return ast entry information
  644. * if mac address and pdev_id matches
  645. *
  646. * @soc : data path soc handle
  647. * @ast_mac_addr : AST entry mac address
  648. * @pdev_id : pdev_id
  649. * @ast_entry_info : ast entry information
  650. *
  651. * return : true if ast entry found with ast_mac_addr
  652. * false if ast entry not found
  653. */
  654. static bool dp_peer_get_ast_info_by_pdevid_wifi3
  655. (struct cdp_soc_t *soc_hdl,
  656. uint8_t *ast_mac_addr,
  657. uint8_t pdev_id,
  658. struct cdp_ast_entry_info *ast_entry_info)
  659. {
  660. struct dp_ast_entry *ast_entry;
  661. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  662. qdf_spin_lock_bh(&soc->ast_lock);
  663. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  664. if (ast_entry && !ast_entry->delete_in_progress) {
  665. ast_entry_info->type = ast_entry->type;
  666. ast_entry_info->pdev_id = ast_entry->pdev_id;
  667. ast_entry_info->vdev_id = ast_entry->vdev_id;
  668. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  669. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  670. &ast_entry->peer->mac_addr.raw[0],
  671. DP_MAC_ADDR_LEN);
  672. qdf_spin_unlock_bh(&soc->ast_lock);
  673. return true;
  674. }
  675. qdf_spin_unlock_bh(&soc->ast_lock);
  676. return false;
  677. }
  678. /**
  679. * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
  680. * with given mac address
  681. *
  682. * @soc : data path soc handle
  683. * @ast_mac_addr : AST entry mac address
  684. * @callback : callback function to called on ast delete response from FW
  685. * @cookie : argument to be passed to callback
  686. *
  687. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  688. * is sent
  689. * QDF_STATUS_E_INVAL false if ast entry not found
  690. */
  691. static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
  692. uint8_t *mac_addr,
  693. txrx_ast_free_cb callback,
  694. void *cookie)
  695. {
  696. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  697. struct dp_ast_entry *ast_entry;
  698. txrx_ast_free_cb cb = NULL;
  699. void *arg = NULL;
  700. qdf_spin_lock_bh(&soc->ast_lock);
  701. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  702. if (!ast_entry) {
  703. qdf_spin_unlock_bh(&soc->ast_lock);
  704. return -QDF_STATUS_E_INVAL;
  705. }
  706. if (ast_entry->callback) {
  707. cb = ast_entry->callback;
  708. arg = ast_entry->cookie;
  709. }
  710. ast_entry->callback = callback;
  711. ast_entry->cookie = cookie;
  712. /*
  713. * if delete_in_progress is set AST delete is sent to target
  714. * and host is waiting for response should not send delete
  715. * again
  716. */
  717. if (!ast_entry->delete_in_progress)
  718. dp_peer_del_ast(soc, ast_entry);
  719. qdf_spin_unlock_bh(&soc->ast_lock);
  720. if (cb) {
  721. cb(soc->ctrl_psoc,
  722. soc,
  723. arg,
  724. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  725. }
  726. return QDF_STATUS_SUCCESS;
  727. }
  728. /**
  729. * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
  730. * table if mac address and pdev_id matches
  731. *
  732. * @soc : data path soc handle
  733. * @ast_mac_addr : AST entry mac address
  734. * @pdev_id : pdev id
  735. * @callback : callback function to called on ast delete response from FW
  736. * @cookie : argument to be passed to callback
  737. *
  738. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  739. * is sent
  740. * QDF_STATUS_E_INVAL false if ast entry not found
  741. */
  742. static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
  743. uint8_t *mac_addr,
  744. uint8_t pdev_id,
  745. txrx_ast_free_cb callback,
  746. void *cookie)
  747. {
  748. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  749. struct dp_ast_entry *ast_entry;
  750. txrx_ast_free_cb cb = NULL;
  751. void *arg = NULL;
  752. qdf_spin_lock_bh(&soc->ast_lock);
  753. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
  754. if (!ast_entry) {
  755. qdf_spin_unlock_bh(&soc->ast_lock);
  756. return -QDF_STATUS_E_INVAL;
  757. }
  758. if (ast_entry->callback) {
  759. cb = ast_entry->callback;
  760. arg = ast_entry->cookie;
  761. }
  762. ast_entry->callback = callback;
  763. ast_entry->cookie = cookie;
  764. /*
  765. * if delete_in_progress is set AST delete is sent to target
  766. * and host is waiting for response should not sent delete
  767. * again
  768. */
  769. if (!ast_entry->delete_in_progress)
  770. dp_peer_del_ast(soc, ast_entry);
  771. qdf_spin_unlock_bh(&soc->ast_lock);
  772. if (cb) {
  773. cb(soc->ctrl_psoc,
  774. soc,
  775. arg,
  776. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  777. }
  778. return QDF_STATUS_SUCCESS;
  779. }
  780. /**
  781. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  782. * @ring_num: ring num of the ring being queried
  783. * @grp_mask: the grp_mask array for the ring type in question.
  784. *
  785. * The grp_mask array is indexed by group number and the bit fields correspond
  786. * to ring numbers. We are finding which interrupt group a ring belongs to.
  787. *
  788. * Return: the index in the grp_mask array with the ring number.
  789. * -QDF_STATUS_E_NOENT if no entry is found
  790. */
  791. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  792. {
  793. int ext_group_num;
  794. int mask = 1 << ring_num;
  795. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  796. ext_group_num++) {
  797. if (mask & grp_mask[ext_group_num])
  798. return ext_group_num;
  799. }
  800. return -QDF_STATUS_E_NOENT;
  801. }
  802. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  803. enum hal_ring_type ring_type,
  804. int ring_num)
  805. {
  806. int *grp_mask;
  807. switch (ring_type) {
  808. case WBM2SW_RELEASE:
  809. /* dp_tx_comp_handler - soc->tx_comp_ring */
  810. if (ring_num < 3)
  811. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  812. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  813. else if (ring_num == 3) {
  814. /* sw treats this as a separate ring type */
  815. grp_mask = &soc->wlan_cfg_ctx->
  816. int_rx_wbm_rel_ring_mask[0];
  817. ring_num = 0;
  818. } else {
  819. qdf_assert(0);
  820. return -QDF_STATUS_E_NOENT;
  821. }
  822. break;
  823. case REO_EXCEPTION:
  824. /* dp_rx_err_process - &soc->reo_exception_ring */
  825. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  826. break;
  827. case REO_DST:
  828. /* dp_rx_process - soc->reo_dest_ring */
  829. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  830. break;
  831. case REO_STATUS:
  832. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  833. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  834. break;
  835. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  836. case RXDMA_MONITOR_STATUS:
  837. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  838. case RXDMA_MONITOR_DST:
  839. /* dp_mon_process */
  840. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  841. break;
  842. case RXDMA_DST:
  843. /* dp_rxdma_err_process */
  844. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  845. break;
  846. case RXDMA_BUF:
  847. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  848. break;
  849. case RXDMA_MONITOR_BUF:
  850. /* TODO: support low_thresh interrupt */
  851. return -QDF_STATUS_E_NOENT;
  852. break;
  853. case TCL_DATA:
  854. case TCL_CMD:
  855. case REO_CMD:
  856. case SW2WBM_RELEASE:
  857. case WBM_IDLE_LINK:
  858. /* normally empty SW_TO_HW rings */
  859. return -QDF_STATUS_E_NOENT;
  860. break;
  861. case TCL_STATUS:
  862. case REO_REINJECT:
  863. /* misc unused rings */
  864. return -QDF_STATUS_E_NOENT;
  865. break;
  866. case CE_SRC:
  867. case CE_DST:
  868. case CE_DST_STATUS:
  869. /* CE_rings - currently handled by hif */
  870. default:
  871. return -QDF_STATUS_E_NOENT;
  872. break;
  873. }
  874. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  875. }
  876. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  877. *ring_params, int ring_type, int ring_num)
  878. {
  879. int msi_group_number;
  880. int msi_data_count;
  881. int ret;
  882. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  883. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  884. &msi_data_count, &msi_data_start,
  885. &msi_irq_start);
  886. if (ret)
  887. return;
  888. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  889. ring_num);
  890. if (msi_group_number < 0) {
  891. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  892. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  893. ring_type, ring_num);
  894. ring_params->msi_addr = 0;
  895. ring_params->msi_data = 0;
  896. return;
  897. }
  898. if (msi_group_number > msi_data_count) {
  899. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  900. FL("2 msi_groups will share an msi; msi_group_num %d"),
  901. msi_group_number);
  902. QDF_ASSERT(0);
  903. }
  904. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  905. ring_params->msi_addr = addr_low;
  906. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  907. ring_params->msi_data = (msi_group_number % msi_data_count)
  908. + msi_data_start;
  909. ring_params->flags |= HAL_SRNG_MSI_INTR;
  910. }
  911. /**
  912. * dp_print_ast_stats() - Dump AST table contents
  913. * @soc: Datapath soc handle
  914. *
  915. * return void
  916. */
  917. #ifdef FEATURE_AST
  918. void dp_print_ast_stats(struct dp_soc *soc)
  919. {
  920. uint8_t i;
  921. uint8_t num_entries = 0;
  922. struct dp_vdev *vdev;
  923. struct dp_pdev *pdev;
  924. struct dp_peer *peer;
  925. struct dp_ast_entry *ase, *tmp_ase;
  926. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  927. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  928. "DA", "HMWDS_SEC"};
  929. DP_PRINT_STATS("AST Stats:");
  930. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  931. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  932. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  933. DP_PRINT_STATS("AST Table:");
  934. qdf_spin_lock_bh(&soc->ast_lock);
  935. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  936. pdev = soc->pdev_list[i];
  937. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  938. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  939. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  940. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  941. DP_PRINT_STATS("%6d mac_addr = %pM"
  942. " peer_mac_addr = %pM"
  943. " peer_id = %u"
  944. " type = %s"
  945. " next_hop = %d"
  946. " is_active = %d"
  947. " is_bss = %d"
  948. " ast_idx = %d"
  949. " ast_hash = %d"
  950. " delete_in_progress = %d"
  951. " pdev_id = %d"
  952. " vdev_id = %d",
  953. ++num_entries,
  954. ase->mac_addr.raw,
  955. ase->peer->mac_addr.raw,
  956. ase->peer->peer_ids[0],
  957. type[ase->type],
  958. ase->next_hop,
  959. ase->is_active,
  960. ase->is_bss,
  961. ase->ast_idx,
  962. ase->ast_hash_value,
  963. ase->delete_in_progress,
  964. ase->pdev_id,
  965. ase->vdev_id);
  966. }
  967. }
  968. }
  969. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  970. }
  971. qdf_spin_unlock_bh(&soc->ast_lock);
  972. }
  973. #else
  974. void dp_print_ast_stats(struct dp_soc *soc)
  975. {
  976. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  977. return;
  978. }
  979. #endif
  980. /**
  981. * dp_print_peer_table() - Dump all Peer stats
  982. * @vdev: Datapath Vdev handle
  983. *
  984. * return void
  985. */
  986. static void dp_print_peer_table(struct dp_vdev *vdev)
  987. {
  988. struct dp_peer *peer = NULL;
  989. DP_PRINT_STATS("Dumping Peer Table Stats:");
  990. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  991. if (!peer) {
  992. DP_PRINT_STATS("Invalid Peer");
  993. return;
  994. }
  995. DP_PRINT_STATS(" peer_mac_addr = %pM"
  996. " nawds_enabled = %d"
  997. " bss_peer = %d"
  998. " wapi = %d"
  999. " wds_enabled = %d"
  1000. " delete in progress = %d"
  1001. " peer id = %d",
  1002. peer->mac_addr.raw,
  1003. peer->nawds_enabled,
  1004. peer->bss_peer,
  1005. peer->wapi,
  1006. peer->wds_enabled,
  1007. peer->delete_in_progress,
  1008. peer->peer_ids[0]);
  1009. }
  1010. }
  1011. /*
  1012. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  1013. */
  1014. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  1015. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  1016. {
  1017. void *hal_soc = soc->hal_soc;
  1018. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  1019. /* TODO: See if we should get align size from hal */
  1020. uint32_t ring_base_align = 8;
  1021. struct hal_srng_params ring_params;
  1022. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  1023. /* TODO: Currently hal layer takes care of endianness related settings.
  1024. * See if these settings need to passed from DP layer
  1025. */
  1026. ring_params.flags = 0;
  1027. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  1028. srng->hal_srng = NULL;
  1029. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  1030. srng->num_entries = num_entries;
  1031. if (!soc->dp_soc_reinit) {
  1032. srng->base_vaddr_unaligned =
  1033. qdf_mem_alloc_consistent(soc->osdev,
  1034. soc->osdev->dev,
  1035. srng->alloc_size,
  1036. &srng->base_paddr_unaligned);
  1037. }
  1038. if (!srng->base_vaddr_unaligned) {
  1039. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1040. FL("alloc failed - ring_type: %d, ring_num %d"),
  1041. ring_type, ring_num);
  1042. return QDF_STATUS_E_NOMEM;
  1043. }
  1044. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  1045. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  1046. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  1047. ((unsigned long)(ring_params.ring_base_vaddr) -
  1048. (unsigned long)srng->base_vaddr_unaligned);
  1049. ring_params.num_entries = num_entries;
  1050. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  1051. FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
  1052. ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
  1053. (void *)ring_params.ring_base_paddr, ring_params.num_entries);
  1054. if (soc->intr_mode == DP_INTR_MSI) {
  1055. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  1056. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1057. FL("Using MSI for ring_type: %d, ring_num %d"),
  1058. ring_type, ring_num);
  1059. } else {
  1060. ring_params.msi_data = 0;
  1061. ring_params.msi_addr = 0;
  1062. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1063. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  1064. ring_type, ring_num);
  1065. }
  1066. /*
  1067. * Setup interrupt timer and batch counter thresholds for
  1068. * interrupt mitigation based on ring type
  1069. */
  1070. if (ring_type == REO_DST) {
  1071. ring_params.intr_timer_thres_us =
  1072. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1073. ring_params.intr_batch_cntr_thres_entries =
  1074. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  1075. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  1076. ring_params.intr_timer_thres_us =
  1077. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  1078. ring_params.intr_batch_cntr_thres_entries =
  1079. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  1080. } else {
  1081. ring_params.intr_timer_thres_us =
  1082. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1083. ring_params.intr_batch_cntr_thres_entries =
  1084. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1085. }
  1086. /* Enable low threshold interrupts for rx buffer rings (regular and
  1087. * monitor buffer rings.
  1088. * TODO: See if this is required for any other ring
  1089. */
  1090. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  1091. (ring_type == RXDMA_MONITOR_STATUS)) {
  1092. /* TODO: Setting low threshold to 1/8th of ring size
  1093. * see if this needs to be configurable
  1094. */
  1095. ring_params.low_threshold = num_entries >> 3;
  1096. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1097. ring_params.intr_timer_thres_us =
  1098. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1099. ring_params.intr_batch_cntr_thres_entries = 0;
  1100. }
  1101. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1102. mac_id, &ring_params);
  1103. if (!srng->hal_srng) {
  1104. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1105. srng->alloc_size,
  1106. srng->base_vaddr_unaligned,
  1107. srng->base_paddr_unaligned, 0);
  1108. }
  1109. return 0;
  1110. }
  1111. /*
  1112. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1113. * @soc: DP SOC handle
  1114. * @srng: source ring structure
  1115. * @ring_type: type of ring
  1116. * @ring_num: ring number
  1117. *
  1118. * Return: None
  1119. */
  1120. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1121. int ring_type, int ring_num)
  1122. {
  1123. if (!srng->hal_srng) {
  1124. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1125. FL("Ring type: %d, num:%d not setup"),
  1126. ring_type, ring_num);
  1127. return;
  1128. }
  1129. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1130. srng->hal_srng = NULL;
  1131. }
  1132. /**
  1133. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1134. * Any buffers allocated and attached to ring entries are expected to be freed
  1135. * before calling this function.
  1136. */
  1137. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1138. int ring_type, int ring_num)
  1139. {
  1140. if (!soc->dp_soc_reinit) {
  1141. if (!srng->hal_srng && (srng->alloc_size == 0)) {
  1142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1143. FL("Ring type: %d, num:%d not setup"),
  1144. ring_type, ring_num);
  1145. return;
  1146. }
  1147. if (srng->hal_srng) {
  1148. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1149. srng->hal_srng = NULL;
  1150. }
  1151. }
  1152. if (srng->alloc_size) {
  1153. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1154. srng->alloc_size,
  1155. srng->base_vaddr_unaligned,
  1156. srng->base_paddr_unaligned, 0);
  1157. srng->alloc_size = 0;
  1158. }
  1159. }
  1160. /* TODO: Need this interface from HIF */
  1161. void *hif_get_hal_handle(void *hif_handle);
  1162. /*
  1163. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1164. * @dp_ctx: DP SOC handle
  1165. * @budget: Number of frames/descriptors that can be processed in one shot
  1166. *
  1167. * Return: remaining budget/quota for the soc device
  1168. */
  1169. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1170. {
  1171. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1172. struct dp_soc *soc = int_ctx->soc;
  1173. int ring = 0;
  1174. uint32_t work_done = 0;
  1175. int budget = dp_budget;
  1176. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1177. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1178. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1179. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1180. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1181. uint32_t remaining_quota = dp_budget;
  1182. struct dp_pdev *pdev = NULL;
  1183. int mac_id;
  1184. /* Process Tx completion interrupts first to return back buffers */
  1185. while (tx_mask) {
  1186. if (tx_mask & 0x1) {
  1187. work_done = dp_tx_comp_handler(soc,
  1188. soc->tx_comp_ring[ring].hal_srng,
  1189. remaining_quota);
  1190. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1191. "tx mask 0x%x ring %d, budget %d, work_done %d",
  1192. tx_mask, ring, budget, work_done);
  1193. budget -= work_done;
  1194. if (budget <= 0)
  1195. goto budget_done;
  1196. remaining_quota = budget;
  1197. }
  1198. tx_mask = tx_mask >> 1;
  1199. ring++;
  1200. }
  1201. /* Process REO Exception ring interrupt */
  1202. if (rx_err_mask) {
  1203. work_done = dp_rx_err_process(soc,
  1204. soc->reo_exception_ring.hal_srng,
  1205. remaining_quota);
  1206. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1207. "REO Exception Ring: work_done %d budget %d",
  1208. work_done, budget);
  1209. budget -= work_done;
  1210. if (budget <= 0) {
  1211. goto budget_done;
  1212. }
  1213. remaining_quota = budget;
  1214. }
  1215. /* Process Rx WBM release ring interrupt */
  1216. if (rx_wbm_rel_mask) {
  1217. work_done = dp_rx_wbm_err_process(soc,
  1218. soc->rx_rel_ring.hal_srng, remaining_quota);
  1219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1220. "WBM Release Ring: work_done %d budget %d",
  1221. work_done, budget);
  1222. budget -= work_done;
  1223. if (budget <= 0) {
  1224. goto budget_done;
  1225. }
  1226. remaining_quota = budget;
  1227. }
  1228. /* Process Rx interrupts */
  1229. if (rx_mask) {
  1230. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1231. if (rx_mask & (1 << ring)) {
  1232. work_done = dp_rx_process(int_ctx,
  1233. soc->reo_dest_ring[ring].hal_srng,
  1234. ring,
  1235. remaining_quota);
  1236. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1237. "rx mask 0x%x ring %d, work_done %d budget %d",
  1238. rx_mask, ring, work_done, budget);
  1239. budget -= work_done;
  1240. if (budget <= 0)
  1241. goto budget_done;
  1242. remaining_quota = budget;
  1243. }
  1244. }
  1245. }
  1246. if (reo_status_mask)
  1247. dp_reo_status_ring_handler(soc);
  1248. /* Process LMAC interrupts */
  1249. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1250. pdev = soc->pdev_list[ring];
  1251. if (pdev == NULL)
  1252. continue;
  1253. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1254. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1255. pdev->pdev_id);
  1256. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1257. work_done = dp_mon_process(soc, mac_for_pdev,
  1258. remaining_quota);
  1259. budget -= work_done;
  1260. if (budget <= 0)
  1261. goto budget_done;
  1262. remaining_quota = budget;
  1263. }
  1264. if (int_ctx->rxdma2host_ring_mask &
  1265. (1 << mac_for_pdev)) {
  1266. work_done = dp_rxdma_err_process(soc,
  1267. mac_for_pdev,
  1268. remaining_quota);
  1269. budget -= work_done;
  1270. if (budget <= 0)
  1271. goto budget_done;
  1272. remaining_quota = budget;
  1273. }
  1274. if (int_ctx->host2rxdma_ring_mask &
  1275. (1 << mac_for_pdev)) {
  1276. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1277. union dp_rx_desc_list_elem_t *tail = NULL;
  1278. struct dp_srng *rx_refill_buf_ring =
  1279. &pdev->rx_refill_buf_ring;
  1280. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1281. 1);
  1282. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1283. rx_refill_buf_ring,
  1284. &soc->rx_desc_buf[mac_for_pdev], 0,
  1285. &desc_list, &tail);
  1286. }
  1287. }
  1288. }
  1289. qdf_lro_flush(int_ctx->lro_ctx);
  1290. budget_done:
  1291. return dp_budget - budget;
  1292. }
  1293. /* dp_interrupt_timer()- timer poll for interrupts
  1294. *
  1295. * @arg: SoC Handle
  1296. *
  1297. * Return:
  1298. *
  1299. */
  1300. static void dp_interrupt_timer(void *arg)
  1301. {
  1302. struct dp_soc *soc = (struct dp_soc *) arg;
  1303. int i;
  1304. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1305. for (i = 0;
  1306. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1307. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1308. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1309. }
  1310. }
  1311. /*
  1312. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1313. * @txrx_soc: DP SOC handle
  1314. *
  1315. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1316. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1317. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1318. *
  1319. * Return: 0 for success, nonzero for failure.
  1320. */
  1321. static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
  1322. {
  1323. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1324. int i;
  1325. soc->intr_mode = DP_INTR_POLL;
  1326. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1327. soc->intr_ctx[i].dp_intr_id = i;
  1328. soc->intr_ctx[i].tx_ring_mask =
  1329. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1330. soc->intr_ctx[i].rx_ring_mask =
  1331. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1332. soc->intr_ctx[i].rx_mon_ring_mask =
  1333. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1334. soc->intr_ctx[i].rx_err_ring_mask =
  1335. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1336. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1337. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1338. soc->intr_ctx[i].reo_status_ring_mask =
  1339. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1340. soc->intr_ctx[i].rxdma2host_ring_mask =
  1341. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1342. soc->intr_ctx[i].soc = soc;
  1343. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1344. }
  1345. qdf_timer_init(soc->osdev, &soc->int_timer,
  1346. dp_interrupt_timer, (void *)soc,
  1347. QDF_TIMER_TYPE_WAKE_APPS);
  1348. return QDF_STATUS_SUCCESS;
  1349. }
  1350. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  1351. #if defined(CONFIG_MCL)
  1352. /*
  1353. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1354. * @txrx_soc: DP SOC handle
  1355. *
  1356. * Call the appropriate attach function based on the mode of operation.
  1357. * This is a WAR for enabling monitor mode.
  1358. *
  1359. * Return: 0 for success. nonzero for failure.
  1360. */
  1361. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1362. {
  1363. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1364. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1365. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  1366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1367. "%s: Poll mode", __func__);
  1368. return dp_soc_attach_poll(txrx_soc);
  1369. } else {
  1370. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1371. "%s: Interrupt mode", __func__);
  1372. return dp_soc_interrupt_attach(txrx_soc);
  1373. }
  1374. }
  1375. #else
  1376. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1377. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1378. {
  1379. return dp_soc_attach_poll(txrx_soc);
  1380. }
  1381. #else
  1382. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1383. {
  1384. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1385. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1386. return dp_soc_attach_poll(txrx_soc);
  1387. else
  1388. return dp_soc_interrupt_attach(txrx_soc);
  1389. }
  1390. #endif
  1391. #endif
  1392. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1393. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1394. {
  1395. int j;
  1396. int num_irq = 0;
  1397. int tx_mask =
  1398. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1399. int rx_mask =
  1400. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1401. int rx_mon_mask =
  1402. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1403. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1404. soc->wlan_cfg_ctx, intr_ctx_num);
  1405. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1406. soc->wlan_cfg_ctx, intr_ctx_num);
  1407. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1408. soc->wlan_cfg_ctx, intr_ctx_num);
  1409. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1410. soc->wlan_cfg_ctx, intr_ctx_num);
  1411. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1412. soc->wlan_cfg_ctx, intr_ctx_num);
  1413. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1414. soc->wlan_cfg_ctx, intr_ctx_num);
  1415. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1416. if (tx_mask & (1 << j)) {
  1417. irq_id_map[num_irq++] =
  1418. (wbm2host_tx_completions_ring1 - j);
  1419. }
  1420. if (rx_mask & (1 << j)) {
  1421. irq_id_map[num_irq++] =
  1422. (reo2host_destination_ring1 - j);
  1423. }
  1424. if (rxdma2host_ring_mask & (1 << j)) {
  1425. irq_id_map[num_irq++] =
  1426. rxdma2host_destination_ring_mac1 -
  1427. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1428. }
  1429. if (host2rxdma_ring_mask & (1 << j)) {
  1430. irq_id_map[num_irq++] =
  1431. host2rxdma_host_buf_ring_mac1 -
  1432. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1433. }
  1434. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1435. irq_id_map[num_irq++] =
  1436. host2rxdma_monitor_ring1 -
  1437. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1438. }
  1439. if (rx_mon_mask & (1 << j)) {
  1440. irq_id_map[num_irq++] =
  1441. ppdu_end_interrupts_mac1 -
  1442. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1443. irq_id_map[num_irq++] =
  1444. rxdma2host_monitor_status_ring_mac1 -
  1445. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1446. }
  1447. if (rx_wbm_rel_ring_mask & (1 << j))
  1448. irq_id_map[num_irq++] = wbm2host_rx_release;
  1449. if (rx_err_ring_mask & (1 << j))
  1450. irq_id_map[num_irq++] = reo2host_exception;
  1451. if (reo_status_ring_mask & (1 << j))
  1452. irq_id_map[num_irq++] = reo2host_status;
  1453. }
  1454. *num_irq_r = num_irq;
  1455. }
  1456. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1457. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1458. int msi_vector_count, int msi_vector_start)
  1459. {
  1460. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1461. soc->wlan_cfg_ctx, intr_ctx_num);
  1462. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1463. soc->wlan_cfg_ctx, intr_ctx_num);
  1464. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1465. soc->wlan_cfg_ctx, intr_ctx_num);
  1466. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1467. soc->wlan_cfg_ctx, intr_ctx_num);
  1468. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1469. soc->wlan_cfg_ctx, intr_ctx_num);
  1470. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1471. soc->wlan_cfg_ctx, intr_ctx_num);
  1472. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1473. soc->wlan_cfg_ctx, intr_ctx_num);
  1474. unsigned int vector =
  1475. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1476. int num_irq = 0;
  1477. soc->intr_mode = DP_INTR_MSI;
  1478. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1479. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1480. irq_id_map[num_irq++] =
  1481. pld_get_msi_irq(soc->osdev->dev, vector);
  1482. *num_irq_r = num_irq;
  1483. }
  1484. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1485. int *irq_id_map, int *num_irq)
  1486. {
  1487. int msi_vector_count, ret;
  1488. uint32_t msi_base_data, msi_vector_start;
  1489. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1490. &msi_vector_count,
  1491. &msi_base_data,
  1492. &msi_vector_start);
  1493. if (ret)
  1494. return dp_soc_interrupt_map_calculate_integrated(soc,
  1495. intr_ctx_num, irq_id_map, num_irq);
  1496. else
  1497. dp_soc_interrupt_map_calculate_msi(soc,
  1498. intr_ctx_num, irq_id_map, num_irq,
  1499. msi_vector_count, msi_vector_start);
  1500. }
  1501. /*
  1502. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1503. * @txrx_soc: DP SOC handle
  1504. *
  1505. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1506. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1507. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1508. *
  1509. * Return: 0 for success. nonzero for failure.
  1510. */
  1511. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  1512. {
  1513. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1514. int i = 0;
  1515. int num_irq = 0;
  1516. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1517. int ret = 0;
  1518. /* Map of IRQ ids registered with one interrupt context */
  1519. int irq_id_map[HIF_MAX_GRP_IRQ];
  1520. int tx_mask =
  1521. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1522. int rx_mask =
  1523. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1524. int rx_mon_mask =
  1525. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1526. int rx_err_ring_mask =
  1527. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1528. int rx_wbm_rel_ring_mask =
  1529. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1530. int reo_status_ring_mask =
  1531. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1532. int rxdma2host_ring_mask =
  1533. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1534. int host2rxdma_ring_mask =
  1535. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1536. int host2rxdma_mon_ring_mask =
  1537. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1538. soc->wlan_cfg_ctx, i);
  1539. soc->intr_ctx[i].dp_intr_id = i;
  1540. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1541. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1542. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1543. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1544. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1545. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1546. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1547. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1548. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1549. host2rxdma_mon_ring_mask;
  1550. soc->intr_ctx[i].soc = soc;
  1551. num_irq = 0;
  1552. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1553. &num_irq);
  1554. ret = hif_register_ext_group(soc->hif_handle,
  1555. num_irq, irq_id_map, dp_service_srngs,
  1556. &soc->intr_ctx[i], "dp_intr",
  1557. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1558. if (ret) {
  1559. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1560. FL("failed, ret = %d"), ret);
  1561. return QDF_STATUS_E_FAILURE;
  1562. }
  1563. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1564. }
  1565. hif_configure_ext_group_interrupts(soc->hif_handle);
  1566. return QDF_STATUS_SUCCESS;
  1567. }
  1568. /*
  1569. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1570. * @txrx_soc: DP SOC handle
  1571. *
  1572. * Return: void
  1573. */
  1574. static void dp_soc_interrupt_detach(void *txrx_soc)
  1575. {
  1576. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1577. int i;
  1578. if (soc->intr_mode == DP_INTR_POLL) {
  1579. qdf_timer_stop(&soc->int_timer);
  1580. qdf_timer_free(&soc->int_timer);
  1581. } else {
  1582. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1583. }
  1584. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1585. soc->intr_ctx[i].tx_ring_mask = 0;
  1586. soc->intr_ctx[i].rx_ring_mask = 0;
  1587. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1588. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1589. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1590. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1591. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1592. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1593. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1594. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1595. }
  1596. }
  1597. #define AVG_MAX_MPDUS_PER_TID 128
  1598. #define AVG_TIDS_PER_CLIENT 2
  1599. #define AVG_FLOWS_PER_TID 2
  1600. #define AVG_MSDUS_PER_FLOW 128
  1601. #define AVG_MSDUS_PER_MPDU 4
  1602. /*
  1603. * Allocate and setup link descriptor pool that will be used by HW for
  1604. * various link and queue descriptors and managed by WBM
  1605. */
  1606. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1607. {
  1608. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1609. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1610. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1611. uint32_t num_mpdus_per_link_desc =
  1612. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1613. uint32_t num_msdus_per_link_desc =
  1614. hal_num_msdus_per_link_desc(soc->hal_soc);
  1615. uint32_t num_mpdu_links_per_queue_desc =
  1616. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1617. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1618. uint32_t total_link_descs, total_mem_size;
  1619. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1620. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1621. uint32_t num_link_desc_banks;
  1622. uint32_t last_bank_size = 0;
  1623. uint32_t entry_size, num_entries;
  1624. int i;
  1625. uint32_t desc_id = 0;
  1626. qdf_dma_addr_t *baseaddr = NULL;
  1627. /* Only Tx queue descriptors are allocated from common link descriptor
  1628. * pool Rx queue descriptors are not included in this because (REO queue
  1629. * extension descriptors) they are expected to be allocated contiguously
  1630. * with REO queue descriptors
  1631. */
  1632. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1633. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1634. num_mpdu_queue_descs = num_mpdu_link_descs /
  1635. num_mpdu_links_per_queue_desc;
  1636. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1637. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1638. num_msdus_per_link_desc;
  1639. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1640. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1641. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1642. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1643. /* Round up to power of 2 */
  1644. total_link_descs = 1;
  1645. while (total_link_descs < num_entries)
  1646. total_link_descs <<= 1;
  1647. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1648. FL("total_link_descs: %u, link_desc_size: %d"),
  1649. total_link_descs, link_desc_size);
  1650. total_mem_size = total_link_descs * link_desc_size;
  1651. total_mem_size += link_desc_align;
  1652. if (total_mem_size <= max_alloc_size) {
  1653. num_link_desc_banks = 0;
  1654. last_bank_size = total_mem_size;
  1655. } else {
  1656. num_link_desc_banks = (total_mem_size) /
  1657. (max_alloc_size - link_desc_align);
  1658. last_bank_size = total_mem_size %
  1659. (max_alloc_size - link_desc_align);
  1660. }
  1661. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1662. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1663. total_mem_size, num_link_desc_banks);
  1664. for (i = 0; i < num_link_desc_banks; i++) {
  1665. if (!soc->dp_soc_reinit) {
  1666. baseaddr = &soc->link_desc_banks[i].
  1667. base_paddr_unaligned;
  1668. soc->link_desc_banks[i].base_vaddr_unaligned =
  1669. qdf_mem_alloc_consistent(soc->osdev,
  1670. soc->osdev->dev,
  1671. max_alloc_size,
  1672. baseaddr);
  1673. }
  1674. soc->link_desc_banks[i].size = max_alloc_size;
  1675. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1676. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1677. ((unsigned long)(
  1678. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1679. link_desc_align));
  1680. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1681. soc->link_desc_banks[i].base_paddr_unaligned) +
  1682. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1683. (unsigned long)(
  1684. soc->link_desc_banks[i].base_vaddr_unaligned));
  1685. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1686. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1687. FL("Link descriptor memory alloc failed"));
  1688. goto fail;
  1689. }
  1690. }
  1691. if (last_bank_size) {
  1692. /* Allocate last bank in case total memory required is not exact
  1693. * multiple of max_alloc_size
  1694. */
  1695. if (!soc->dp_soc_reinit) {
  1696. baseaddr = &soc->link_desc_banks[i].
  1697. base_paddr_unaligned;
  1698. soc->link_desc_banks[i].base_vaddr_unaligned =
  1699. qdf_mem_alloc_consistent(soc->osdev,
  1700. soc->osdev->dev,
  1701. last_bank_size,
  1702. baseaddr);
  1703. }
  1704. soc->link_desc_banks[i].size = last_bank_size;
  1705. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1706. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1707. ((unsigned long)(
  1708. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1709. link_desc_align));
  1710. soc->link_desc_banks[i].base_paddr =
  1711. (unsigned long)(
  1712. soc->link_desc_banks[i].base_paddr_unaligned) +
  1713. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1714. (unsigned long)(
  1715. soc->link_desc_banks[i].base_vaddr_unaligned));
  1716. }
  1717. /* Allocate and setup link descriptor idle list for HW internal use */
  1718. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1719. total_mem_size = entry_size * total_link_descs;
  1720. if (total_mem_size <= max_alloc_size) {
  1721. void *desc;
  1722. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1723. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1724. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1725. FL("Link desc idle ring setup failed"));
  1726. goto fail;
  1727. }
  1728. hal_srng_access_start_unlocked(soc->hal_soc,
  1729. soc->wbm_idle_link_ring.hal_srng);
  1730. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1731. soc->link_desc_banks[i].base_paddr; i++) {
  1732. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1733. ((unsigned long)(
  1734. soc->link_desc_banks[i].base_vaddr) -
  1735. (unsigned long)(
  1736. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1737. / link_desc_size;
  1738. unsigned long paddr = (unsigned long)(
  1739. soc->link_desc_banks[i].base_paddr);
  1740. while (num_entries && (desc = hal_srng_src_get_next(
  1741. soc->hal_soc,
  1742. soc->wbm_idle_link_ring.hal_srng))) {
  1743. hal_set_link_desc_addr(desc,
  1744. LINK_DESC_COOKIE(desc_id, i), paddr);
  1745. num_entries--;
  1746. desc_id++;
  1747. paddr += link_desc_size;
  1748. }
  1749. }
  1750. hal_srng_access_end_unlocked(soc->hal_soc,
  1751. soc->wbm_idle_link_ring.hal_srng);
  1752. } else {
  1753. uint32_t num_scatter_bufs;
  1754. uint32_t num_entries_per_buf;
  1755. uint32_t rem_entries;
  1756. uint8_t *scatter_buf_ptr;
  1757. uint16_t scatter_buf_num;
  1758. uint32_t buf_size = 0;
  1759. soc->wbm_idle_scatter_buf_size =
  1760. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1761. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1762. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1763. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1764. soc->hal_soc, total_mem_size,
  1765. soc->wbm_idle_scatter_buf_size);
  1766. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1767. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1768. FL("scatter bufs size out of bounds"));
  1769. goto fail;
  1770. }
  1771. for (i = 0; i < num_scatter_bufs; i++) {
  1772. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1773. if (!soc->dp_soc_reinit) {
  1774. buf_size = soc->wbm_idle_scatter_buf_size;
  1775. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1776. qdf_mem_alloc_consistent(soc->osdev,
  1777. soc->osdev->
  1778. dev,
  1779. buf_size,
  1780. baseaddr);
  1781. }
  1782. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1783. QDF_TRACE(QDF_MODULE_ID_DP,
  1784. QDF_TRACE_LEVEL_ERROR,
  1785. FL("Scatter lst memory alloc fail"));
  1786. goto fail;
  1787. }
  1788. }
  1789. /* Populate idle list scatter buffers with link descriptor
  1790. * pointers
  1791. */
  1792. scatter_buf_num = 0;
  1793. scatter_buf_ptr = (uint8_t *)(
  1794. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1795. rem_entries = num_entries_per_buf;
  1796. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1797. soc->link_desc_banks[i].base_paddr; i++) {
  1798. uint32_t num_link_descs =
  1799. (soc->link_desc_banks[i].size -
  1800. ((unsigned long)(
  1801. soc->link_desc_banks[i].base_vaddr) -
  1802. (unsigned long)(
  1803. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1804. / link_desc_size;
  1805. unsigned long paddr = (unsigned long)(
  1806. soc->link_desc_banks[i].base_paddr);
  1807. while (num_link_descs) {
  1808. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1809. LINK_DESC_COOKIE(desc_id, i), paddr);
  1810. num_link_descs--;
  1811. desc_id++;
  1812. paddr += link_desc_size;
  1813. rem_entries--;
  1814. if (rem_entries) {
  1815. scatter_buf_ptr += entry_size;
  1816. } else {
  1817. rem_entries = num_entries_per_buf;
  1818. scatter_buf_num++;
  1819. if (scatter_buf_num >= num_scatter_bufs)
  1820. break;
  1821. scatter_buf_ptr = (uint8_t *)(
  1822. soc->wbm_idle_scatter_buf_base_vaddr[
  1823. scatter_buf_num]);
  1824. }
  1825. }
  1826. }
  1827. /* Setup link descriptor idle list in HW */
  1828. hal_setup_link_idle_list(soc->hal_soc,
  1829. soc->wbm_idle_scatter_buf_base_paddr,
  1830. soc->wbm_idle_scatter_buf_base_vaddr,
  1831. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1832. (uint32_t)(scatter_buf_ptr -
  1833. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1834. scatter_buf_num-1])), total_link_descs);
  1835. }
  1836. return 0;
  1837. fail:
  1838. if (soc->wbm_idle_link_ring.hal_srng) {
  1839. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1840. WBM_IDLE_LINK, 0);
  1841. }
  1842. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1843. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1844. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1845. soc->wbm_idle_scatter_buf_size,
  1846. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1847. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1848. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1849. }
  1850. }
  1851. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1852. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1853. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1854. soc->link_desc_banks[i].size,
  1855. soc->link_desc_banks[i].base_vaddr_unaligned,
  1856. soc->link_desc_banks[i].base_paddr_unaligned,
  1857. 0);
  1858. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1859. }
  1860. }
  1861. return QDF_STATUS_E_FAILURE;
  1862. }
  1863. /*
  1864. * Free link descriptor pool that was setup HW
  1865. */
  1866. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1867. {
  1868. int i;
  1869. if (soc->wbm_idle_link_ring.hal_srng) {
  1870. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1871. WBM_IDLE_LINK, 0);
  1872. }
  1873. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1874. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1875. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1876. soc->wbm_idle_scatter_buf_size,
  1877. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1878. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1879. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1880. }
  1881. }
  1882. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1883. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1884. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1885. soc->link_desc_banks[i].size,
  1886. soc->link_desc_banks[i].base_vaddr_unaligned,
  1887. soc->link_desc_banks[i].base_paddr_unaligned,
  1888. 0);
  1889. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1890. }
  1891. }
  1892. }
  1893. #ifdef IPA_OFFLOAD
  1894. #define REO_DST_RING_SIZE_QCA6290 1023
  1895. #ifndef QCA_WIFI_QCA8074_VP
  1896. #define REO_DST_RING_SIZE_QCA8074 1023
  1897. #else
  1898. #define REO_DST_RING_SIZE_QCA8074 8
  1899. #endif /* QCA_WIFI_QCA8074_VP */
  1900. #else
  1901. #define REO_DST_RING_SIZE_QCA6290 1024
  1902. #ifndef QCA_WIFI_QCA8074_VP
  1903. #define REO_DST_RING_SIZE_QCA8074 2048
  1904. #else
  1905. #define REO_DST_RING_SIZE_QCA8074 8
  1906. #endif /* QCA_WIFI_QCA8074_VP */
  1907. #endif /* IPA_OFFLOAD */
  1908. /*
  1909. * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
  1910. * @soc: Datapath SOC handle
  1911. *
  1912. * This is a timer function used to age out stale AST nodes from
  1913. * AST table
  1914. */
  1915. #ifdef FEATURE_WDS
  1916. static void dp_ast_aging_timer_fn(void *soc_hdl)
  1917. {
  1918. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1919. struct dp_pdev *pdev;
  1920. struct dp_vdev *vdev;
  1921. struct dp_peer *peer;
  1922. struct dp_ast_entry *ase, *temp_ase;
  1923. int i;
  1924. bool check_wds_ase = false;
  1925. if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
  1926. soc->wds_ast_aging_timer_cnt = 0;
  1927. check_wds_ase = true;
  1928. }
  1929. /* Peer list access lock */
  1930. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1931. /* AST list access lock */
  1932. qdf_spin_lock_bh(&soc->ast_lock);
  1933. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1934. pdev = soc->pdev_list[i];
  1935. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1936. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1937. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1938. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1939. /*
  1940. * Do not expire static ast entries
  1941. * and HM WDS entries
  1942. */
  1943. if (ase->type !=
  1944. CDP_TXRX_AST_TYPE_WDS &&
  1945. ase->type !=
  1946. CDP_TXRX_AST_TYPE_MEC &&
  1947. ase->type !=
  1948. CDP_TXRX_AST_TYPE_DA)
  1949. continue;
  1950. /* Expire MEC entry every n sec.
  1951. * This needs to be expired in
  1952. * case if STA backbone is made as
  1953. * AP backbone, In this case it needs
  1954. * to be re-added as a WDS entry.
  1955. */
  1956. if (ase->is_active && ase->type ==
  1957. CDP_TXRX_AST_TYPE_MEC) {
  1958. ase->is_active = FALSE;
  1959. continue;
  1960. } else if (ase->is_active &&
  1961. check_wds_ase) {
  1962. ase->is_active = FALSE;
  1963. continue;
  1964. }
  1965. if (ase->type ==
  1966. CDP_TXRX_AST_TYPE_MEC) {
  1967. DP_STATS_INC(soc,
  1968. ast.aged_out, 1);
  1969. dp_peer_del_ast(soc, ase);
  1970. } else if (check_wds_ase) {
  1971. DP_STATS_INC(soc,
  1972. ast.aged_out, 1);
  1973. dp_peer_del_ast(soc, ase);
  1974. }
  1975. }
  1976. }
  1977. }
  1978. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1979. }
  1980. qdf_spin_unlock_bh(&soc->ast_lock);
  1981. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1982. if (qdf_atomic_read(&soc->cmn_init_done))
  1983. qdf_timer_mod(&soc->ast_aging_timer,
  1984. DP_AST_AGING_TIMER_DEFAULT_MS);
  1985. }
  1986. /*
  1987. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1988. * @soc: Datapath SOC handle
  1989. *
  1990. * Return: None
  1991. */
  1992. static void dp_soc_wds_attach(struct dp_soc *soc)
  1993. {
  1994. soc->wds_ast_aging_timer_cnt = 0;
  1995. qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
  1996. dp_ast_aging_timer_fn, (void *)soc,
  1997. QDF_TIMER_TYPE_WAKE_APPS);
  1998. qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
  1999. }
  2000. /*
  2001. * dp_soc_wds_detach() - Detach WDS data structures and timers
  2002. * @txrx_soc: DP SOC handle
  2003. *
  2004. * Return: None
  2005. */
  2006. static void dp_soc_wds_detach(struct dp_soc *soc)
  2007. {
  2008. qdf_timer_stop(&soc->ast_aging_timer);
  2009. qdf_timer_free(&soc->ast_aging_timer);
  2010. }
  2011. #else
  2012. static void dp_soc_wds_attach(struct dp_soc *soc)
  2013. {
  2014. }
  2015. static void dp_soc_wds_detach(struct dp_soc *soc)
  2016. {
  2017. }
  2018. #endif
  2019. /*
  2020. * dp_soc_reset_ring_map() - Reset cpu ring map
  2021. * @soc: Datapath soc handler
  2022. *
  2023. * This api resets the default cpu ring map
  2024. */
  2025. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  2026. {
  2027. uint8_t i;
  2028. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2029. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  2030. switch (nss_config) {
  2031. case dp_nss_cfg_first_radio:
  2032. /*
  2033. * Setting Tx ring map for one nss offloaded radio
  2034. */
  2035. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  2036. break;
  2037. case dp_nss_cfg_second_radio:
  2038. /*
  2039. * Setting Tx ring for two nss offloaded radios
  2040. */
  2041. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  2042. break;
  2043. case dp_nss_cfg_dbdc:
  2044. /*
  2045. * Setting Tx ring map for 2 nss offloaded radios
  2046. */
  2047. soc->tx_ring_map[i] =
  2048. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  2049. break;
  2050. case dp_nss_cfg_dbtc:
  2051. /*
  2052. * Setting Tx ring map for 3 nss offloaded radios
  2053. */
  2054. soc->tx_ring_map[i] =
  2055. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  2056. break;
  2057. default:
  2058. dp_err("tx_ring_map failed due to invalid nss cfg");
  2059. break;
  2060. }
  2061. }
  2062. }
  2063. /*
  2064. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  2065. * @dp_soc - DP soc handle
  2066. * @ring_type - ring type
  2067. * @ring_num - ring_num
  2068. *
  2069. * return 0 or 1
  2070. */
  2071. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  2072. {
  2073. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2074. uint8_t status = 0;
  2075. switch (ring_type) {
  2076. case WBM2SW_RELEASE:
  2077. case REO_DST:
  2078. case RXDMA_BUF:
  2079. status = ((nss_config) & (1 << ring_num));
  2080. break;
  2081. default:
  2082. break;
  2083. }
  2084. return status;
  2085. }
  2086. /*
  2087. * dp_soc_reset_intr_mask() - reset interrupt mask
  2088. * @dp_soc - DP Soc handle
  2089. *
  2090. * Return: Return void
  2091. */
  2092. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  2093. {
  2094. uint8_t j;
  2095. int *grp_mask = NULL;
  2096. int group_number, mask, num_ring;
  2097. /* number of tx ring */
  2098. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  2099. /*
  2100. * group mask for tx completion ring.
  2101. */
  2102. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  2103. /* loop and reset the mask for only offloaded ring */
  2104. for (j = 0; j < num_ring; j++) {
  2105. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  2106. continue;
  2107. }
  2108. /*
  2109. * Group number corresponding to tx offloaded ring.
  2110. */
  2111. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2112. if (group_number < 0) {
  2113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2114. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2115. WBM2SW_RELEASE, j);
  2116. return;
  2117. }
  2118. /* reset the tx mask for offloaded ring */
  2119. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2120. mask &= (~(1 << j));
  2121. /*
  2122. * reset the interrupt mask for offloaded ring.
  2123. */
  2124. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2125. }
  2126. /* number of rx rings */
  2127. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2128. /*
  2129. * group mask for reo destination ring.
  2130. */
  2131. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2132. /* loop and reset the mask for only offloaded ring */
  2133. for (j = 0; j < num_ring; j++) {
  2134. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2135. continue;
  2136. }
  2137. /*
  2138. * Group number corresponding to rx offloaded ring.
  2139. */
  2140. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2141. if (group_number < 0) {
  2142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2143. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2144. REO_DST, j);
  2145. return;
  2146. }
  2147. /* set the interrupt mask for offloaded ring */
  2148. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2149. mask &= (~(1 << j));
  2150. /*
  2151. * set the interrupt mask to zero for rx offloaded radio.
  2152. */
  2153. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2154. }
  2155. /*
  2156. * group mask for Rx buffer refill ring
  2157. */
  2158. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2159. /* loop and reset the mask for only offloaded ring */
  2160. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2161. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2162. continue;
  2163. }
  2164. /*
  2165. * Group number corresponding to rx offloaded ring.
  2166. */
  2167. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2168. if (group_number < 0) {
  2169. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2170. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2171. REO_DST, j);
  2172. return;
  2173. }
  2174. /* set the interrupt mask for offloaded ring */
  2175. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2176. group_number);
  2177. mask &= (~(1 << j));
  2178. /*
  2179. * set the interrupt mask to zero for rx offloaded radio.
  2180. */
  2181. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2182. group_number, mask);
  2183. }
  2184. }
  2185. #ifdef IPA_OFFLOAD
  2186. /**
  2187. * dp_reo_remap_config() - configure reo remap register value based
  2188. * nss configuration.
  2189. * based on offload_radio value below remap configuration
  2190. * get applied.
  2191. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2192. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2193. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2194. * 3 - both Radios handled by NSS (remap not required)
  2195. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2196. *
  2197. * @remap1: output parameter indicates reo remap 1 register value
  2198. * @remap2: output parameter indicates reo remap 2 register value
  2199. * Return: bool type, true if remap is configured else false.
  2200. */
  2201. static bool dp_reo_remap_config(struct dp_soc *soc,
  2202. uint32_t *remap1,
  2203. uint32_t *remap2)
  2204. {
  2205. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  2206. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  2207. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  2208. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  2209. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2210. return true;
  2211. }
  2212. #else
  2213. static bool dp_reo_remap_config(struct dp_soc *soc,
  2214. uint32_t *remap1,
  2215. uint32_t *remap2)
  2216. {
  2217. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2218. switch (offload_radio) {
  2219. case dp_nss_cfg_default:
  2220. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2221. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2222. (0x3 << 18) | (0x4 << 21)) << 8;
  2223. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2224. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2225. (0x3 << 18) | (0x4 << 21)) << 8;
  2226. break;
  2227. case dp_nss_cfg_first_radio:
  2228. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  2229. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  2230. (0x2 << 18) | (0x3 << 21)) << 8;
  2231. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  2232. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  2233. (0x4 << 18) | (0x2 << 21)) << 8;
  2234. break;
  2235. case dp_nss_cfg_second_radio:
  2236. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  2237. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  2238. (0x1 << 18) | (0x3 << 21)) << 8;
  2239. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  2240. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  2241. (0x4 << 18) | (0x1 << 21)) << 8;
  2242. break;
  2243. case dp_nss_cfg_dbdc:
  2244. case dp_nss_cfg_dbtc:
  2245. /* return false if both or all are offloaded to NSS */
  2246. return false;
  2247. }
  2248. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2249. *remap1, *remap2, offload_radio);
  2250. return true;
  2251. }
  2252. #endif
  2253. /*
  2254. * dp_reo_frag_dst_set() - configure reo register to set the
  2255. * fragment destination ring
  2256. * @soc : Datapath soc
  2257. * @frag_dst_ring : output parameter to set fragment destination ring
  2258. *
  2259. * Based on offload_radio below fragment destination rings is selected
  2260. * 0 - TCL
  2261. * 1 - SW1
  2262. * 2 - SW2
  2263. * 3 - SW3
  2264. * 4 - SW4
  2265. * 5 - Release
  2266. * 6 - FW
  2267. * 7 - alternate select
  2268. *
  2269. * return: void
  2270. */
  2271. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2272. {
  2273. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2274. switch (offload_radio) {
  2275. case dp_nss_cfg_default:
  2276. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2277. break;
  2278. case dp_nss_cfg_dbdc:
  2279. case dp_nss_cfg_dbtc:
  2280. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2281. break;
  2282. default:
  2283. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2284. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2285. break;
  2286. }
  2287. }
  2288. /*
  2289. * dp_soc_cmn_setup() - Common SoC level initializion
  2290. * @soc: Datapath SOC handle
  2291. *
  2292. * This is an internal function used to setup common SOC data structures,
  2293. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2294. */
  2295. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2296. {
  2297. int i;
  2298. struct hal_reo_params reo_params;
  2299. int tx_ring_size;
  2300. int tx_comp_ring_size;
  2301. int reo_dst_ring_size;
  2302. uint32_t entries;
  2303. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2304. if (qdf_atomic_read(&soc->cmn_init_done))
  2305. return 0;
  2306. if (dp_hw_link_desc_pool_setup(soc))
  2307. goto fail1;
  2308. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2309. /* Setup SRNG rings */
  2310. /* Common rings */
  2311. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2312. wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
  2313. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2314. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2315. goto fail1;
  2316. }
  2317. soc->num_tcl_data_rings = 0;
  2318. /* Tx data rings */
  2319. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2320. soc->num_tcl_data_rings =
  2321. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2322. tx_comp_ring_size =
  2323. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2324. tx_ring_size =
  2325. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2326. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2327. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2328. TCL_DATA, i, 0, tx_ring_size)) {
  2329. QDF_TRACE(QDF_MODULE_ID_DP,
  2330. QDF_TRACE_LEVEL_ERROR,
  2331. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2332. goto fail1;
  2333. }
  2334. /*
  2335. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2336. * count
  2337. */
  2338. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2339. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  2340. QDF_TRACE(QDF_MODULE_ID_DP,
  2341. QDF_TRACE_LEVEL_ERROR,
  2342. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2343. goto fail1;
  2344. }
  2345. }
  2346. } else {
  2347. /* This will be incremented during per pdev ring setup */
  2348. soc->num_tcl_data_rings = 0;
  2349. }
  2350. if (dp_tx_soc_attach(soc)) {
  2351. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2352. FL("dp_tx_soc_attach failed"));
  2353. goto fail1;
  2354. }
  2355. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2356. /* TCL command and status rings */
  2357. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2358. entries)) {
  2359. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2360. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2361. goto fail1;
  2362. }
  2363. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2364. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2365. entries)) {
  2366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2367. FL("dp_srng_setup failed for tcl_status_ring"));
  2368. goto fail1;
  2369. }
  2370. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2371. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2372. * descriptors
  2373. */
  2374. /* Rx data rings */
  2375. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2376. soc->num_reo_dest_rings =
  2377. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2378. QDF_TRACE(QDF_MODULE_ID_DP,
  2379. QDF_TRACE_LEVEL_INFO,
  2380. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2381. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2382. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2383. i, 0, reo_dst_ring_size)) {
  2384. QDF_TRACE(QDF_MODULE_ID_DP,
  2385. QDF_TRACE_LEVEL_ERROR,
  2386. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2387. goto fail1;
  2388. }
  2389. }
  2390. } else {
  2391. /* This will be incremented during per pdev ring setup */
  2392. soc->num_reo_dest_rings = 0;
  2393. }
  2394. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2395. /* LMAC RxDMA to SW Rings configuration */
  2396. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2397. /* Only valid for MCL */
  2398. struct dp_pdev *pdev = soc->pdev_list[0];
  2399. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2400. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2401. RXDMA_DST, 0, i,
  2402. entries)) {
  2403. QDF_TRACE(QDF_MODULE_ID_DP,
  2404. QDF_TRACE_LEVEL_ERROR,
  2405. FL(RNG_ERR "rxdma_err_dst_ring"));
  2406. goto fail1;
  2407. }
  2408. }
  2409. }
  2410. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2411. /* REO reinjection ring */
  2412. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2413. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2414. entries)) {
  2415. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2416. FL("dp_srng_setup failed for reo_reinject_ring"));
  2417. goto fail1;
  2418. }
  2419. /* Rx release ring */
  2420. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2421. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
  2422. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2423. FL("dp_srng_setup failed for rx_rel_ring"));
  2424. goto fail1;
  2425. }
  2426. /* Rx exception ring */
  2427. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2428. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2429. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
  2430. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2431. FL("dp_srng_setup failed for reo_exception_ring"));
  2432. goto fail1;
  2433. }
  2434. /* REO command and status rings */
  2435. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2436. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
  2437. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2438. FL("dp_srng_setup failed for reo_cmd_ring"));
  2439. goto fail1;
  2440. }
  2441. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2442. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2443. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2444. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2445. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
  2446. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2447. FL("dp_srng_setup failed for reo_status_ring"));
  2448. goto fail1;
  2449. }
  2450. /* Reset the cpu ring map if radio is NSS offloaded */
  2451. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2452. dp_soc_reset_cpu_ring_map(soc);
  2453. dp_soc_reset_intr_mask(soc);
  2454. }
  2455. /* Setup HW REO */
  2456. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2457. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2458. /*
  2459. * Reo ring remap is not required if both radios
  2460. * are offloaded to NSS
  2461. */
  2462. if (!dp_reo_remap_config(soc,
  2463. &reo_params.remap1,
  2464. &reo_params.remap2))
  2465. goto out;
  2466. reo_params.rx_hash_enabled = true;
  2467. }
  2468. /* setup the global rx defrag waitlist */
  2469. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2470. soc->rx.defrag.timeout_ms =
  2471. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2472. soc->rx.flags.defrag_timeout_check =
  2473. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2474. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2475. out:
  2476. /*
  2477. * set the fragment destination ring
  2478. */
  2479. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2480. hal_reo_setup(soc->hal_soc, &reo_params);
  2481. qdf_atomic_set(&soc->cmn_init_done, 1);
  2482. dp_soc_wds_attach(soc);
  2483. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2484. return 0;
  2485. fail1:
  2486. /*
  2487. * Cleanup will be done as part of soc_detach, which will
  2488. * be called on pdev attach failure
  2489. */
  2490. return QDF_STATUS_E_FAILURE;
  2491. }
  2492. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  2493. static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2494. {
  2495. struct cdp_lro_hash_config lro_hash;
  2496. QDF_STATUS status;
  2497. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2498. !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
  2499. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2500. dp_err("LRO, GRO and RX hash disabled");
  2501. return QDF_STATUS_E_FAILURE;
  2502. }
  2503. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2504. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
  2505. wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
  2506. lro_hash.lro_enable = 1;
  2507. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2508. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2509. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2510. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2511. }
  2512. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2513. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2514. LRO_IPV4_SEED_ARR_SZ));
  2515. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2516. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2517. LRO_IPV6_SEED_ARR_SZ));
  2518. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2519. if (!soc->cdp_soc.ol_ops->lro_hash_config) {
  2520. QDF_BUG(0);
  2521. dp_err("lro_hash_config not configured");
  2522. return QDF_STATUS_E_FAILURE;
  2523. }
  2524. status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
  2525. &lro_hash);
  2526. if (!QDF_IS_STATUS_SUCCESS(status)) {
  2527. dp_err("failed to send lro_hash_config to FW %u", status);
  2528. return status;
  2529. }
  2530. dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2531. lro_hash.lro_enable, lro_hash.tcp_flag,
  2532. lro_hash.tcp_flag_mask);
  2533. dp_info("toeplitz_hash_ipv4:");
  2534. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2535. (void *)lro_hash.toeplitz_hash_ipv4,
  2536. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2537. LRO_IPV4_SEED_ARR_SZ));
  2538. dp_info("toeplitz_hash_ipv6:");
  2539. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2540. (void *)lro_hash.toeplitz_hash_ipv6,
  2541. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2542. LRO_IPV6_SEED_ARR_SZ));
  2543. return status;
  2544. }
  2545. /*
  2546. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2547. * @soc: data path SoC handle
  2548. * @pdev: Physical device handle
  2549. *
  2550. * Return: 0 - success, > 0 - failure
  2551. */
  2552. #ifdef QCA_HOST2FW_RXBUF_RING
  2553. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2554. struct dp_pdev *pdev)
  2555. {
  2556. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2557. int max_mac_rings;
  2558. int i;
  2559. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2560. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2561. for (i = 0; i < max_mac_rings; i++) {
  2562. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2563. "%s: pdev_id %d mac_id %d",
  2564. __func__, pdev->pdev_id, i);
  2565. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2566. RXDMA_BUF, 1, i,
  2567. wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
  2568. QDF_TRACE(QDF_MODULE_ID_DP,
  2569. QDF_TRACE_LEVEL_ERROR,
  2570. FL("failed rx mac ring setup"));
  2571. return QDF_STATUS_E_FAILURE;
  2572. }
  2573. }
  2574. return QDF_STATUS_SUCCESS;
  2575. }
  2576. #else
  2577. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2578. struct dp_pdev *pdev)
  2579. {
  2580. return QDF_STATUS_SUCCESS;
  2581. }
  2582. #endif
  2583. /**
  2584. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2585. * @pdev - DP_PDEV handle
  2586. *
  2587. * Return: void
  2588. */
  2589. static inline void
  2590. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2591. {
  2592. uint8_t map_id;
  2593. struct dp_soc *soc = pdev->soc;
  2594. if (!soc)
  2595. return;
  2596. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2597. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2598. default_dscp_tid_map,
  2599. sizeof(default_dscp_tid_map));
  2600. }
  2601. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2602. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2603. default_dscp_tid_map,
  2604. map_id);
  2605. }
  2606. }
  2607. #ifdef IPA_OFFLOAD
  2608. /**
  2609. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2610. * @soc: data path instance
  2611. * @pdev: core txrx pdev context
  2612. *
  2613. * Return: QDF_STATUS_SUCCESS: success
  2614. * QDF_STATUS_E_RESOURCES: Error return
  2615. */
  2616. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2617. struct dp_pdev *pdev)
  2618. {
  2619. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2620. int entries;
  2621. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2622. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2623. /* Setup second Rx refill buffer ring */
  2624. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2625. IPA_RX_REFILL_BUF_RING_IDX,
  2626. pdev->pdev_id,
  2627. entries)) {
  2628. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2629. FL("dp_srng_setup failed second rx refill ring"));
  2630. return QDF_STATUS_E_FAILURE;
  2631. }
  2632. return QDF_STATUS_SUCCESS;
  2633. }
  2634. /**
  2635. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2636. * @soc: data path instance
  2637. * @pdev: core txrx pdev context
  2638. *
  2639. * Return: void
  2640. */
  2641. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2642. struct dp_pdev *pdev)
  2643. {
  2644. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2645. IPA_RX_REFILL_BUF_RING_IDX);
  2646. }
  2647. #else
  2648. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2649. struct dp_pdev *pdev)
  2650. {
  2651. return QDF_STATUS_SUCCESS;
  2652. }
  2653. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2654. struct dp_pdev *pdev)
  2655. {
  2656. }
  2657. #endif
  2658. #if !defined(DISABLE_MON_CONFIG)
  2659. /**
  2660. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2661. * @soc: soc handle
  2662. * @pdev: physical device handle
  2663. *
  2664. * Return: nonzero on failure and zero on success
  2665. */
  2666. static
  2667. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2668. {
  2669. int mac_id = 0;
  2670. int pdev_id = pdev->pdev_id;
  2671. int entries;
  2672. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2673. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2674. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2675. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2676. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2677. entries =
  2678. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2679. if (dp_srng_setup(soc,
  2680. &pdev->rxdma_mon_buf_ring[mac_id],
  2681. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2682. entries)) {
  2683. QDF_TRACE(QDF_MODULE_ID_DP,
  2684. QDF_TRACE_LEVEL_ERROR,
  2685. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2686. return QDF_STATUS_E_NOMEM;
  2687. }
  2688. entries =
  2689. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2690. if (dp_srng_setup(soc,
  2691. &pdev->rxdma_mon_dst_ring[mac_id],
  2692. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2693. entries)) {
  2694. QDF_TRACE(QDF_MODULE_ID_DP,
  2695. QDF_TRACE_LEVEL_ERROR,
  2696. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2697. return QDF_STATUS_E_NOMEM;
  2698. }
  2699. entries =
  2700. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2701. if (dp_srng_setup(soc,
  2702. &pdev->rxdma_mon_status_ring[mac_id],
  2703. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2704. entries)) {
  2705. QDF_TRACE(QDF_MODULE_ID_DP,
  2706. QDF_TRACE_LEVEL_ERROR,
  2707. FL(RNG_ERR "rxdma_mon_status_ring"));
  2708. return QDF_STATUS_E_NOMEM;
  2709. }
  2710. entries =
  2711. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2712. if (dp_srng_setup(soc,
  2713. &pdev->rxdma_mon_desc_ring[mac_id],
  2714. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2715. entries)) {
  2716. QDF_TRACE(QDF_MODULE_ID_DP,
  2717. QDF_TRACE_LEVEL_ERROR,
  2718. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2719. return QDF_STATUS_E_NOMEM;
  2720. }
  2721. } else {
  2722. entries =
  2723. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2724. if (dp_srng_setup(soc,
  2725. &pdev->rxdma_mon_status_ring[mac_id],
  2726. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2727. entries)) {
  2728. QDF_TRACE(QDF_MODULE_ID_DP,
  2729. QDF_TRACE_LEVEL_ERROR,
  2730. FL(RNG_ERR "rxdma_mon_status_ring"));
  2731. return QDF_STATUS_E_NOMEM;
  2732. }
  2733. }
  2734. }
  2735. return QDF_STATUS_SUCCESS;
  2736. }
  2737. #else
  2738. static
  2739. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2740. {
  2741. return QDF_STATUS_SUCCESS;
  2742. }
  2743. #endif
  2744. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  2745. * @pdev_hdl: pdev handle
  2746. */
  2747. #ifdef ATH_SUPPORT_EXT_STAT
  2748. void dp_iterate_update_peer_list(void *pdev_hdl)
  2749. {
  2750. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  2751. struct dp_soc *soc = pdev->soc;
  2752. struct dp_vdev *vdev = NULL;
  2753. struct dp_peer *peer = NULL;
  2754. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2755. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  2756. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  2757. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  2758. dp_cal_client_update_peer_stats(&peer->stats);
  2759. }
  2760. }
  2761. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  2762. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2763. }
  2764. #else
  2765. void dp_iterate_update_peer_list(void *pdev_hdl)
  2766. {
  2767. }
  2768. #endif
  2769. /*
  2770. * dp_pdev_attach_wifi3() - attach txrx pdev
  2771. * @ctrl_pdev: Opaque PDEV object
  2772. * @txrx_soc: Datapath SOC handle
  2773. * @htc_handle: HTC handle for host-target interface
  2774. * @qdf_osdev: QDF OS device
  2775. * @pdev_id: PDEV ID
  2776. *
  2777. * Return: DP PDEV handle on success, NULL on failure
  2778. */
  2779. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  2780. struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  2781. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  2782. {
  2783. int tx_ring_size;
  2784. int tx_comp_ring_size;
  2785. int reo_dst_ring_size;
  2786. int entries;
  2787. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2788. int nss_cfg;
  2789. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2790. struct dp_pdev *pdev = NULL;
  2791. if (soc->dp_soc_reinit)
  2792. pdev = soc->pdev_list[pdev_id];
  2793. else
  2794. pdev = qdf_mem_malloc(sizeof(*pdev));
  2795. if (!pdev) {
  2796. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2797. FL("DP PDEV memory allocation failed"));
  2798. goto fail0;
  2799. }
  2800. /*
  2801. * Variable to prevent double pdev deinitialization during
  2802. * radio detach execution .i.e. in the absence of any vdev.
  2803. */
  2804. pdev->pdev_deinit = 0;
  2805. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  2806. if (!pdev->invalid_peer) {
  2807. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2808. FL("Invalid peer memory allocation failed"));
  2809. qdf_mem_free(pdev);
  2810. goto fail0;
  2811. }
  2812. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2813. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  2814. if (!pdev->wlan_cfg_ctx) {
  2815. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2816. FL("pdev cfg_attach failed"));
  2817. qdf_mem_free(pdev->invalid_peer);
  2818. qdf_mem_free(pdev);
  2819. goto fail0;
  2820. }
  2821. /*
  2822. * set nss pdev config based on soc config
  2823. */
  2824. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  2825. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  2826. (nss_cfg & (1 << pdev_id)));
  2827. pdev->soc = soc;
  2828. pdev->ctrl_pdev = ctrl_pdev;
  2829. pdev->pdev_id = pdev_id;
  2830. soc->pdev_list[pdev_id] = pdev;
  2831. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  2832. soc->pdev_count++;
  2833. TAILQ_INIT(&pdev->vdev_list);
  2834. qdf_spinlock_create(&pdev->vdev_list_lock);
  2835. pdev->vdev_count = 0;
  2836. qdf_spinlock_create(&pdev->tx_mutex);
  2837. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  2838. TAILQ_INIT(&pdev->neighbour_peers_list);
  2839. pdev->neighbour_peers_added = false;
  2840. pdev->monitor_configured = false;
  2841. if (dp_soc_cmn_setup(soc)) {
  2842. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2843. FL("dp_soc_cmn_setup failed"));
  2844. goto fail1;
  2845. }
  2846. /* Setup per PDEV TCL rings if configured */
  2847. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2848. tx_ring_size =
  2849. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2850. tx_comp_ring_size =
  2851. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2852. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  2853. pdev_id, pdev_id, tx_ring_size)) {
  2854. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2855. FL("dp_srng_setup failed for tcl_data_ring"));
  2856. goto fail1;
  2857. }
  2858. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  2859. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  2860. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2861. FL("dp_srng_setup failed for tx_comp_ring"));
  2862. goto fail1;
  2863. }
  2864. soc->num_tcl_data_rings++;
  2865. }
  2866. /* Tx specific init */
  2867. if (dp_tx_pdev_attach(pdev)) {
  2868. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2869. FL("dp_tx_pdev_attach failed"));
  2870. goto fail1;
  2871. }
  2872. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2873. /* Setup per PDEV REO rings if configured */
  2874. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2875. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  2876. pdev_id, pdev_id, reo_dst_ring_size)) {
  2877. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2878. FL("dp_srng_setup failed for reo_dest_ringn"));
  2879. goto fail1;
  2880. }
  2881. soc->num_reo_dest_rings++;
  2882. }
  2883. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  2884. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
  2885. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2886. FL("dp_srng_setup failed rx refill ring"));
  2887. goto fail1;
  2888. }
  2889. if (dp_rxdma_ring_setup(soc, pdev)) {
  2890. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2891. FL("RXDMA ring config failed"));
  2892. goto fail1;
  2893. }
  2894. if (dp_mon_rings_setup(soc, pdev)) {
  2895. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2896. FL("MONITOR rings setup failed"));
  2897. goto fail1;
  2898. }
  2899. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2900. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2901. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  2902. 0, pdev_id,
  2903. entries)) {
  2904. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2905. FL(RNG_ERR "rxdma_err_dst_ring"));
  2906. goto fail1;
  2907. }
  2908. }
  2909. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  2910. goto fail1;
  2911. if (dp_ipa_ring_resource_setup(soc, pdev))
  2912. goto fail1;
  2913. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  2914. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2915. FL("dp_ipa_uc_attach failed"));
  2916. goto fail1;
  2917. }
  2918. /* Rx specific init */
  2919. if (dp_rx_pdev_attach(pdev)) {
  2920. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2921. FL("dp_rx_pdev_attach failed"));
  2922. goto fail1;
  2923. }
  2924. DP_STATS_INIT(pdev);
  2925. /* Monitor filter init */
  2926. pdev->mon_filter_mode = MON_FILTER_ALL;
  2927. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  2928. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  2929. pdev->fp_data_filter = FILTER_DATA_ALL;
  2930. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  2931. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  2932. pdev->mo_data_filter = FILTER_DATA_ALL;
  2933. dp_local_peer_id_pool_init(pdev);
  2934. dp_dscp_tid_map_setup(pdev);
  2935. /* Rx monitor mode specific init */
  2936. if (dp_rx_pdev_mon_attach(pdev)) {
  2937. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2938. "dp_rx_pdev_mon_attach failed");
  2939. goto fail1;
  2940. }
  2941. if (dp_wdi_event_attach(pdev)) {
  2942. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2943. "dp_wdi_evet_attach failed");
  2944. goto fail1;
  2945. }
  2946. /* set the reo destination during initialization */
  2947. pdev->reo_dest = pdev->pdev_id + 1;
  2948. /*
  2949. * initialize ppdu tlv list
  2950. */
  2951. TAILQ_INIT(&pdev->ppdu_info_list);
  2952. pdev->tlv_count = 0;
  2953. pdev->list_depth = 0;
  2954. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  2955. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  2956. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  2957. TRUE);
  2958. /* initlialize cal client timer */
  2959. dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
  2960. &dp_iterate_update_peer_list);
  2961. qdf_event_create(&pdev->fw_peer_stats_event);
  2962. return (struct cdp_pdev *)pdev;
  2963. fail1:
  2964. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  2965. fail0:
  2966. return NULL;
  2967. }
  2968. /*
  2969. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2970. * @soc: data path SoC handle
  2971. * @pdev: Physical device handle
  2972. *
  2973. * Return: void
  2974. */
  2975. #ifdef QCA_HOST2FW_RXBUF_RING
  2976. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2977. struct dp_pdev *pdev)
  2978. {
  2979. int max_mac_rings =
  2980. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2981. int i;
  2982. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2983. max_mac_rings : MAX_RX_MAC_RINGS;
  2984. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2985. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2986. RXDMA_BUF, 1);
  2987. qdf_timer_free(&soc->mon_reap_timer);
  2988. }
  2989. #else
  2990. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2991. struct dp_pdev *pdev)
  2992. {
  2993. }
  2994. #endif
  2995. /*
  2996. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2997. * @pdev: device object
  2998. *
  2999. * Return: void
  3000. */
  3001. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  3002. {
  3003. struct dp_neighbour_peer *peer = NULL;
  3004. struct dp_neighbour_peer *temp_peer = NULL;
  3005. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3006. neighbour_peer_list_elem, temp_peer) {
  3007. /* delete this peer from the list */
  3008. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  3009. peer, neighbour_peer_list_elem);
  3010. qdf_mem_free(peer);
  3011. }
  3012. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  3013. }
  3014. /**
  3015. * dp_htt_ppdu_stats_detach() - detach stats resources
  3016. * @pdev: Datapath PDEV handle
  3017. *
  3018. * Return: void
  3019. */
  3020. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  3021. {
  3022. struct ppdu_info *ppdu_info, *ppdu_info_next;
  3023. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  3024. ppdu_info_list_elem, ppdu_info_next) {
  3025. if (!ppdu_info)
  3026. break;
  3027. qdf_assert_always(ppdu_info->nbuf);
  3028. qdf_nbuf_free(ppdu_info->nbuf);
  3029. qdf_mem_free(ppdu_info);
  3030. }
  3031. }
  3032. #if !defined(DISABLE_MON_CONFIG)
  3033. static
  3034. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3035. int mac_id)
  3036. {
  3037. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3038. dp_srng_cleanup(soc,
  3039. &pdev->rxdma_mon_buf_ring[mac_id],
  3040. RXDMA_MONITOR_BUF, 0);
  3041. dp_srng_cleanup(soc,
  3042. &pdev->rxdma_mon_dst_ring[mac_id],
  3043. RXDMA_MONITOR_DST, 0);
  3044. dp_srng_cleanup(soc,
  3045. &pdev->rxdma_mon_status_ring[mac_id],
  3046. RXDMA_MONITOR_STATUS, 0);
  3047. dp_srng_cleanup(soc,
  3048. &pdev->rxdma_mon_desc_ring[mac_id],
  3049. RXDMA_MONITOR_DESC, 0);
  3050. dp_srng_cleanup(soc,
  3051. &pdev->rxdma_err_dst_ring[mac_id],
  3052. RXDMA_DST, 0);
  3053. } else {
  3054. dp_srng_cleanup(soc,
  3055. &pdev->rxdma_mon_status_ring[mac_id],
  3056. RXDMA_MONITOR_STATUS, 0);
  3057. dp_srng_cleanup(soc,
  3058. &pdev->rxdma_err_dst_ring[mac_id],
  3059. RXDMA_DST, 0);
  3060. }
  3061. }
  3062. #else
  3063. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3064. int mac_id)
  3065. {
  3066. }
  3067. #endif
  3068. /**
  3069. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  3070. *
  3071. * @soc: soc handle
  3072. * @pdev: datapath physical dev handle
  3073. * @mac_id: mac number
  3074. *
  3075. * Return: None
  3076. */
  3077. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  3078. int mac_id)
  3079. {
  3080. }
  3081. /**
  3082. * dp_pdev_mem_reset() - Reset txrx pdev memory
  3083. * @pdev: dp pdev handle
  3084. *
  3085. * Return: None
  3086. */
  3087. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  3088. {
  3089. uint16_t len = 0;
  3090. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  3091. len = sizeof(struct dp_pdev) -
  3092. offsetof(struct dp_pdev, pdev_deinit) -
  3093. sizeof(pdev->pdev_deinit);
  3094. dp_pdev_offset = dp_pdev_offset +
  3095. offsetof(struct dp_pdev, pdev_deinit) +
  3096. sizeof(pdev->pdev_deinit);
  3097. qdf_mem_zero(dp_pdev_offset, len);
  3098. }
  3099. /**
  3100. * dp_pdev_deinit() - Deinit txrx pdev
  3101. * @txrx_pdev: Datapath PDEV handle
  3102. * @force: Force deinit
  3103. *
  3104. * Return: None
  3105. */
  3106. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  3107. {
  3108. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3109. struct dp_soc *soc = pdev->soc;
  3110. qdf_nbuf_t curr_nbuf, next_nbuf;
  3111. int mac_id;
  3112. /*
  3113. * Prevent double pdev deinitialization during radio detach
  3114. * execution .i.e. in the absence of any vdev
  3115. */
  3116. if (pdev->pdev_deinit)
  3117. return;
  3118. pdev->pdev_deinit = 1;
  3119. dp_wdi_event_detach(pdev);
  3120. dp_tx_pdev_detach(pdev);
  3121. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3122. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3123. TCL_DATA, pdev->pdev_id);
  3124. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3125. WBM2SW_RELEASE, pdev->pdev_id);
  3126. }
  3127. dp_pktlogmod_exit(pdev);
  3128. dp_rx_pdev_detach(pdev);
  3129. dp_rx_pdev_mon_detach(pdev);
  3130. dp_neighbour_peers_detach(pdev);
  3131. qdf_spinlock_destroy(&pdev->tx_mutex);
  3132. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3133. dp_ipa_uc_detach(soc, pdev);
  3134. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3135. /* Cleanup per PDEV REO rings if configured */
  3136. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3137. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3138. REO_DST, pdev->pdev_id);
  3139. }
  3140. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3141. dp_rxdma_ring_cleanup(soc, pdev);
  3142. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3143. dp_mon_ring_deinit(soc, pdev, mac_id);
  3144. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3145. RXDMA_DST, 0);
  3146. }
  3147. curr_nbuf = pdev->invalid_peer_head_msdu;
  3148. while (curr_nbuf) {
  3149. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3150. qdf_nbuf_free(curr_nbuf);
  3151. curr_nbuf = next_nbuf;
  3152. }
  3153. pdev->invalid_peer_head_msdu = NULL;
  3154. pdev->invalid_peer_tail_msdu = NULL;
  3155. dp_htt_ppdu_stats_detach(pdev);
  3156. qdf_nbuf_free(pdev->sojourn_buf);
  3157. dp_cal_client_detach(&pdev->cal_client_ctx);
  3158. soc->pdev_count--;
  3159. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3160. qdf_mem_free(pdev->invalid_peer);
  3161. qdf_mem_free(pdev->dp_txrx_handle);
  3162. dp_pdev_mem_reset(pdev);
  3163. }
  3164. /**
  3165. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3166. * @txrx_pdev: Datapath PDEV handle
  3167. * @force: Force deinit
  3168. *
  3169. * Return: None
  3170. */
  3171. static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3172. {
  3173. dp_pdev_deinit(txrx_pdev, force);
  3174. }
  3175. /*
  3176. * dp_pdev_detach() - Complete rest of pdev detach
  3177. * @txrx_pdev: Datapath PDEV handle
  3178. * @force: Force deinit
  3179. *
  3180. * Return: None
  3181. */
  3182. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3183. {
  3184. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3185. struct dp_soc *soc = pdev->soc;
  3186. int mac_id;
  3187. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3188. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3189. TCL_DATA, pdev->pdev_id);
  3190. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3191. WBM2SW_RELEASE, pdev->pdev_id);
  3192. }
  3193. dp_mon_link_free(pdev);
  3194. /* Cleanup per PDEV REO rings if configured */
  3195. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3196. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3197. REO_DST, pdev->pdev_id);
  3198. }
  3199. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3200. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3201. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3202. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3203. RXDMA_DST, 0);
  3204. }
  3205. soc->pdev_list[pdev->pdev_id] = NULL;
  3206. qdf_mem_free(pdev);
  3207. }
  3208. /*
  3209. * dp_pdev_detach_wifi3() - detach txrx pdev
  3210. * @txrx_pdev: Datapath PDEV handle
  3211. * @force: Force detach
  3212. *
  3213. * Return: None
  3214. */
  3215. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3216. {
  3217. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3218. struct dp_soc *soc = pdev->soc;
  3219. if (soc->dp_soc_reinit) {
  3220. dp_pdev_detach(txrx_pdev, force);
  3221. } else {
  3222. dp_pdev_deinit(txrx_pdev, force);
  3223. dp_pdev_detach(txrx_pdev, force);
  3224. }
  3225. }
  3226. /*
  3227. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3228. * @soc: DP SOC handle
  3229. */
  3230. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3231. {
  3232. struct reo_desc_list_node *desc;
  3233. struct dp_rx_tid *rx_tid;
  3234. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3235. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3236. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3237. rx_tid = &desc->rx_tid;
  3238. qdf_mem_unmap_nbytes_single(soc->osdev,
  3239. rx_tid->hw_qdesc_paddr,
  3240. QDF_DMA_BIDIRECTIONAL,
  3241. rx_tid->hw_qdesc_alloc_size);
  3242. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3243. qdf_mem_free(desc);
  3244. }
  3245. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3246. qdf_list_destroy(&soc->reo_desc_freelist);
  3247. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3248. }
  3249. /**
  3250. * dp_soc_mem_reset() - Reset Dp Soc memory
  3251. * @soc: DP handle
  3252. *
  3253. * Return: None
  3254. */
  3255. static void dp_soc_mem_reset(struct dp_soc *soc)
  3256. {
  3257. uint16_t len = 0;
  3258. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3259. len = sizeof(struct dp_soc) -
  3260. offsetof(struct dp_soc, dp_soc_reinit) -
  3261. sizeof(soc->dp_soc_reinit);
  3262. dp_soc_offset = dp_soc_offset +
  3263. offsetof(struct dp_soc, dp_soc_reinit) +
  3264. sizeof(soc->dp_soc_reinit);
  3265. qdf_mem_zero(dp_soc_offset, len);
  3266. }
  3267. /**
  3268. * dp_soc_deinit() - Deinitialize txrx SOC
  3269. * @txrx_soc: Opaque DP SOC handle
  3270. *
  3271. * Return: None
  3272. */
  3273. static void dp_soc_deinit(void *txrx_soc)
  3274. {
  3275. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3276. int i;
  3277. qdf_atomic_set(&soc->cmn_init_done, 0);
  3278. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3279. if (soc->pdev_list[i])
  3280. dp_pdev_deinit((struct cdp_pdev *)
  3281. soc->pdev_list[i], 1);
  3282. }
  3283. qdf_flush_work(&soc->htt_stats.work);
  3284. qdf_disable_work(&soc->htt_stats.work);
  3285. /* Free pending htt stats messages */
  3286. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3287. dp_reo_cmdlist_destroy(soc);
  3288. dp_peer_find_detach(soc);
  3289. /* Free the ring memories */
  3290. /* Common rings */
  3291. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3292. /* Tx data rings */
  3293. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3294. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3295. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3296. TCL_DATA, i);
  3297. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3298. WBM2SW_RELEASE, i);
  3299. }
  3300. }
  3301. /* TCL command and status rings */
  3302. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3303. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3304. /* Rx data rings */
  3305. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3306. soc->num_reo_dest_rings =
  3307. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3308. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3309. /* TODO: Get number of rings and ring sizes
  3310. * from wlan_cfg
  3311. */
  3312. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3313. REO_DST, i);
  3314. }
  3315. }
  3316. /* REO reinjection ring */
  3317. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3318. /* Rx release ring */
  3319. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3320. /* Rx exception ring */
  3321. /* TODO: Better to store ring_type and ring_num in
  3322. * dp_srng during setup
  3323. */
  3324. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3325. /* REO command and status rings */
  3326. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3327. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3328. dp_soc_wds_detach(soc);
  3329. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3330. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3331. htt_soc_htc_dealloc(soc->htt_handle);
  3332. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  3333. dp_reo_cmdlist_destroy(soc);
  3334. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  3335. dp_reo_desc_freelist_destroy(soc);
  3336. qdf_spinlock_destroy(&soc->ast_lock);
  3337. dp_soc_mem_reset(soc);
  3338. }
  3339. /**
  3340. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3341. * @txrx_soc: Opaque DP SOC handle
  3342. *
  3343. * Return: None
  3344. */
  3345. static void dp_soc_deinit_wifi3(void *txrx_soc)
  3346. {
  3347. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3348. soc->dp_soc_reinit = 1;
  3349. dp_soc_deinit(txrx_soc);
  3350. }
  3351. /*
  3352. * dp_soc_detach() - Detach rest of txrx SOC
  3353. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3354. *
  3355. * Return: None
  3356. */
  3357. static void dp_soc_detach(void *txrx_soc)
  3358. {
  3359. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3360. int i;
  3361. qdf_atomic_set(&soc->cmn_init_done, 0);
  3362. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3363. * SW descriptors
  3364. */
  3365. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3366. if (soc->pdev_list[i])
  3367. dp_pdev_detach((struct cdp_pdev *)
  3368. soc->pdev_list[i], 1);
  3369. }
  3370. /* Free the ring memories */
  3371. /* Common rings */
  3372. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3373. dp_tx_soc_detach(soc);
  3374. /* Tx data rings */
  3375. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3376. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3377. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3378. TCL_DATA, i);
  3379. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3380. WBM2SW_RELEASE, i);
  3381. }
  3382. }
  3383. /* TCL command and status rings */
  3384. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3385. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3386. /* Rx data rings */
  3387. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3388. soc->num_reo_dest_rings =
  3389. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3390. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3391. /* TODO: Get number of rings and ring sizes
  3392. * from wlan_cfg
  3393. */
  3394. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3395. REO_DST, i);
  3396. }
  3397. }
  3398. /* REO reinjection ring */
  3399. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3400. /* Rx release ring */
  3401. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3402. /* Rx exception ring */
  3403. /* TODO: Better to store ring_type and ring_num in
  3404. * dp_srng during setup
  3405. */
  3406. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3407. /* REO command and status rings */
  3408. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3409. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3410. dp_hw_link_desc_pool_cleanup(soc);
  3411. htt_soc_detach(soc->htt_handle);
  3412. soc->dp_soc_reinit = 0;
  3413. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3414. qdf_mem_free(soc);
  3415. }
  3416. /*
  3417. * dp_soc_detach_wifi3() - Detach txrx SOC
  3418. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3419. *
  3420. * Return: None
  3421. */
  3422. static void dp_soc_detach_wifi3(void *txrx_soc)
  3423. {
  3424. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3425. if (soc->dp_soc_reinit) {
  3426. dp_soc_detach(txrx_soc);
  3427. } else {
  3428. dp_soc_deinit(txrx_soc);
  3429. dp_soc_detach(txrx_soc);
  3430. }
  3431. }
  3432. #if !defined(DISABLE_MON_CONFIG)
  3433. /**
  3434. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3435. * @soc: soc handle
  3436. * @pdev: physical device handle
  3437. * @mac_id: ring number
  3438. * @mac_for_pdev: mac_id
  3439. *
  3440. * Return: non-zero for failure, zero for success
  3441. */
  3442. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3443. struct dp_pdev *pdev,
  3444. int mac_id,
  3445. int mac_for_pdev)
  3446. {
  3447. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3448. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3449. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3450. pdev->rxdma_mon_buf_ring[mac_id]
  3451. .hal_srng,
  3452. RXDMA_MONITOR_BUF);
  3453. if (status != QDF_STATUS_SUCCESS) {
  3454. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3455. return status;
  3456. }
  3457. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3458. pdev->rxdma_mon_dst_ring[mac_id]
  3459. .hal_srng,
  3460. RXDMA_MONITOR_DST);
  3461. if (status != QDF_STATUS_SUCCESS) {
  3462. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3463. return status;
  3464. }
  3465. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3466. pdev->rxdma_mon_status_ring[mac_id]
  3467. .hal_srng,
  3468. RXDMA_MONITOR_STATUS);
  3469. if (status != QDF_STATUS_SUCCESS) {
  3470. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3471. return status;
  3472. }
  3473. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3474. pdev->rxdma_mon_desc_ring[mac_id]
  3475. .hal_srng,
  3476. RXDMA_MONITOR_DESC);
  3477. if (status != QDF_STATUS_SUCCESS) {
  3478. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3479. return status;
  3480. }
  3481. } else {
  3482. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3483. pdev->rxdma_mon_status_ring[mac_id]
  3484. .hal_srng,
  3485. RXDMA_MONITOR_STATUS);
  3486. if (status != QDF_STATUS_SUCCESS) {
  3487. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3488. return status;
  3489. }
  3490. }
  3491. return status;
  3492. }
  3493. #else
  3494. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3495. struct dp_pdev *pdev,
  3496. int mac_id,
  3497. int mac_for_pdev)
  3498. {
  3499. return QDF_STATUS_SUCCESS;
  3500. }
  3501. #endif
  3502. /*
  3503. * dp_rxdma_ring_config() - configure the RX DMA rings
  3504. *
  3505. * This function is used to configure the MAC rings.
  3506. * On MCL host provides buffers in Host2FW ring
  3507. * FW refills (copies) buffers to the ring and updates
  3508. * ring_idx in register
  3509. *
  3510. * @soc: data path SoC handle
  3511. *
  3512. * Return: zero on success, non-zero on failure
  3513. */
  3514. #ifdef QCA_HOST2FW_RXBUF_RING
  3515. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3516. {
  3517. int i;
  3518. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3519. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3520. struct dp_pdev *pdev = soc->pdev_list[i];
  3521. if (pdev) {
  3522. int mac_id;
  3523. bool dbs_enable = 0;
  3524. int max_mac_rings =
  3525. wlan_cfg_get_num_mac_rings
  3526. (pdev->wlan_cfg_ctx);
  3527. htt_srng_setup(soc->htt_handle, 0,
  3528. pdev->rx_refill_buf_ring.hal_srng,
  3529. RXDMA_BUF);
  3530. if (pdev->rx_refill_buf_ring2.hal_srng)
  3531. htt_srng_setup(soc->htt_handle, 0,
  3532. pdev->rx_refill_buf_ring2.hal_srng,
  3533. RXDMA_BUF);
  3534. if (soc->cdp_soc.ol_ops->
  3535. is_hw_dbs_2x2_capable) {
  3536. dbs_enable = soc->cdp_soc.ol_ops->
  3537. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  3538. }
  3539. if (dbs_enable) {
  3540. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3541. QDF_TRACE_LEVEL_ERROR,
  3542. FL("DBS enabled max_mac_rings %d"),
  3543. max_mac_rings);
  3544. } else {
  3545. max_mac_rings = 1;
  3546. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3547. QDF_TRACE_LEVEL_ERROR,
  3548. FL("DBS disabled, max_mac_rings %d"),
  3549. max_mac_rings);
  3550. }
  3551. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3552. FL("pdev_id %d max_mac_rings %d"),
  3553. pdev->pdev_id, max_mac_rings);
  3554. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3555. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3556. mac_id, pdev->pdev_id);
  3557. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3558. QDF_TRACE_LEVEL_ERROR,
  3559. FL("mac_id %d"), mac_for_pdev);
  3560. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3561. pdev->rx_mac_buf_ring[mac_id]
  3562. .hal_srng,
  3563. RXDMA_BUF);
  3564. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3565. pdev->rxdma_err_dst_ring[mac_id]
  3566. .hal_srng,
  3567. RXDMA_DST);
  3568. /* Configure monitor mode rings */
  3569. status = dp_mon_htt_srng_setup(soc, pdev,
  3570. mac_id,
  3571. mac_for_pdev);
  3572. if (status != QDF_STATUS_SUCCESS) {
  3573. dp_err("Failed to send htt monitor messages to target");
  3574. return status;
  3575. }
  3576. }
  3577. }
  3578. }
  3579. /*
  3580. * Timer to reap rxdma status rings.
  3581. * Needed until we enable ppdu end interrupts
  3582. */
  3583. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3584. dp_service_mon_rings, (void *)soc,
  3585. QDF_TIMER_TYPE_WAKE_APPS);
  3586. soc->reap_timer_init = 1;
  3587. return status;
  3588. }
  3589. #else
  3590. /* This is only for WIN */
  3591. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3592. {
  3593. int i;
  3594. int mac_id;
  3595. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3596. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3597. struct dp_pdev *pdev = soc->pdev_list[i];
  3598. if (pdev == NULL)
  3599. continue;
  3600. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3601. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3602. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3603. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3604. #ifndef DISABLE_MON_CONFIG
  3605. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3606. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3607. RXDMA_MONITOR_BUF);
  3608. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3609. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  3610. RXDMA_MONITOR_DST);
  3611. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3612. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  3613. RXDMA_MONITOR_STATUS);
  3614. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3615. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  3616. RXDMA_MONITOR_DESC);
  3617. #endif
  3618. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3619. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  3620. RXDMA_DST);
  3621. }
  3622. }
  3623. return status;
  3624. }
  3625. #endif
  3626. /*
  3627. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  3628. * @cdp_soc: Opaque Datapath SOC handle
  3629. *
  3630. * Return: zero on success, non-zero on failure
  3631. */
  3632. static QDF_STATUS
  3633. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  3634. {
  3635. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3636. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3637. htt_soc_attach_target(soc->htt_handle);
  3638. status = dp_rxdma_ring_config(soc);
  3639. if (status != QDF_STATUS_SUCCESS) {
  3640. dp_err("Failed to send htt srng setup messages to target");
  3641. return status;
  3642. }
  3643. DP_STATS_INIT(soc);
  3644. /* initialize work queue for stats processing */
  3645. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3646. return QDF_STATUS_SUCCESS;
  3647. }
  3648. /*
  3649. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  3650. * @txrx_soc: Datapath SOC handle
  3651. */
  3652. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  3653. {
  3654. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3655. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  3656. }
  3657. /*
  3658. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  3659. * @txrx_soc: Datapath SOC handle
  3660. * @nss_cfg: nss config
  3661. */
  3662. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  3663. {
  3664. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3665. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  3666. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  3667. /*
  3668. * TODO: masked out based on the per offloaded radio
  3669. */
  3670. switch (config) {
  3671. case dp_nss_cfg_default:
  3672. break;
  3673. case dp_nss_cfg_dbdc:
  3674. case dp_nss_cfg_dbtc:
  3675. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  3676. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  3677. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  3678. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  3679. break;
  3680. default:
  3681. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3682. "Invalid offload config %d", config);
  3683. }
  3684. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3685. FL("nss-wifi<0> nss config is enabled"));
  3686. }
  3687. /*
  3688. * dp_vdev_attach_wifi3() - attach txrx vdev
  3689. * @txrx_pdev: Datapath PDEV handle
  3690. * @vdev_mac_addr: MAC address of the virtual interface
  3691. * @vdev_id: VDEV Id
  3692. * @wlan_op_mode: VDEV operating mode
  3693. *
  3694. * Return: DP VDEV handle on success, NULL on failure
  3695. */
  3696. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  3697. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  3698. {
  3699. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3700. struct dp_soc *soc = pdev->soc;
  3701. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  3702. if (!vdev) {
  3703. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3704. FL("DP VDEV memory allocation failed"));
  3705. goto fail0;
  3706. }
  3707. vdev->pdev = pdev;
  3708. vdev->vdev_id = vdev_id;
  3709. vdev->opmode = op_mode;
  3710. vdev->osdev = soc->osdev;
  3711. vdev->osif_rx = NULL;
  3712. vdev->osif_rsim_rx_decap = NULL;
  3713. vdev->osif_get_key = NULL;
  3714. vdev->osif_rx_mon = NULL;
  3715. vdev->osif_tx_free_ext = NULL;
  3716. vdev->osif_vdev = NULL;
  3717. vdev->delete.pending = 0;
  3718. vdev->safemode = 0;
  3719. vdev->drop_unenc = 1;
  3720. vdev->sec_type = cdp_sec_type_none;
  3721. #ifdef notyet
  3722. vdev->filters_num = 0;
  3723. #endif
  3724. qdf_mem_copy(
  3725. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3726. /* TODO: Initialize default HTT meta data that will be used in
  3727. * TCL descriptors for packets transmitted from this VDEV
  3728. */
  3729. TAILQ_INIT(&vdev->peer_list);
  3730. if ((soc->intr_mode == DP_INTR_POLL) &&
  3731. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  3732. if ((pdev->vdev_count == 0) ||
  3733. (wlan_op_mode_monitor == vdev->opmode))
  3734. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  3735. }
  3736. if (wlan_op_mode_monitor == vdev->opmode) {
  3737. pdev->monitor_vdev = vdev;
  3738. return (struct cdp_vdev *)vdev;
  3739. }
  3740. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3741. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3742. vdev->dscp_tid_map_id = 0;
  3743. vdev->mcast_enhancement_en = 0;
  3744. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  3745. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3746. /* add this vdev into the pdev's list */
  3747. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  3748. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3749. pdev->vdev_count++;
  3750. dp_tx_vdev_attach(vdev);
  3751. if (pdev->vdev_count == 1)
  3752. dp_lro_hash_setup(soc, pdev);
  3753. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3754. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  3755. DP_STATS_INIT(vdev);
  3756. if (wlan_op_mode_sta == vdev->opmode)
  3757. dp_peer_create_wifi3((struct cdp_vdev *)vdev,
  3758. vdev->mac_addr.raw,
  3759. NULL);
  3760. return (struct cdp_vdev *)vdev;
  3761. fail0:
  3762. return NULL;
  3763. }
  3764. /**
  3765. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  3766. * @vdev: Datapath VDEV handle
  3767. * @osif_vdev: OSIF vdev handle
  3768. * @ctrl_vdev: UMAC vdev handle
  3769. * @txrx_ops: Tx and Rx operations
  3770. *
  3771. * Return: DP VDEV handle on success, NULL on failure
  3772. */
  3773. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  3774. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  3775. struct ol_txrx_ops *txrx_ops)
  3776. {
  3777. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3778. vdev->osif_vdev = osif_vdev;
  3779. vdev->ctrl_vdev = ctrl_vdev;
  3780. vdev->osif_rx = txrx_ops->rx.rx;
  3781. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  3782. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  3783. vdev->osif_get_key = txrx_ops->get_key;
  3784. vdev->osif_rx_mon = txrx_ops->rx.mon;
  3785. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  3786. #ifdef notyet
  3787. #if ATH_SUPPORT_WAPI
  3788. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  3789. #endif
  3790. #endif
  3791. #ifdef UMAC_SUPPORT_PROXY_ARP
  3792. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  3793. #endif
  3794. vdev->me_convert = txrx_ops->me_convert;
  3795. /* TODO: Enable the following once Tx code is integrated */
  3796. if (vdev->mesh_vdev)
  3797. txrx_ops->tx.tx = dp_tx_send_mesh;
  3798. else
  3799. txrx_ops->tx.tx = dp_tx_send;
  3800. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  3801. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  3802. "DP Vdev Register success");
  3803. }
  3804. /**
  3805. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  3806. * @vdev: Datapath VDEV handle
  3807. * @unmap_only: Flag to indicate "only unmap"
  3808. *
  3809. * Return: void
  3810. */
  3811. static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
  3812. {
  3813. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3814. struct dp_pdev *pdev = vdev->pdev;
  3815. struct dp_soc *soc = pdev->soc;
  3816. struct dp_peer *peer;
  3817. uint16_t *peer_ids;
  3818. uint8_t i = 0, j = 0;
  3819. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  3820. if (!peer_ids) {
  3821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3822. "DP alloc failure - unable to flush peers");
  3823. return;
  3824. }
  3825. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3826. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3827. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3828. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  3829. if (j < soc->max_peers)
  3830. peer_ids[j++] = peer->peer_ids[i];
  3831. }
  3832. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3833. for (i = 0; i < j ; i++) {
  3834. peer = dp_peer_find_by_id(soc, peer_ids[i]);
  3835. if (peer) {
  3836. dp_info("peer: %pM is getting flush",
  3837. peer->mac_addr.raw);
  3838. if (!unmap_only)
  3839. dp_peer_delete_wifi3(peer, 0);
  3840. /*
  3841. * we need to call dp_peer_unref_del_find_by_id()
  3842. * to remove additional ref count incremented
  3843. * by dp_peer_find_by_id() call.
  3844. *
  3845. * Hold the ref count while executing
  3846. * dp_peer_delete_wifi3() call.
  3847. *
  3848. */
  3849. dp_peer_unref_del_find_by_id(peer);
  3850. dp_rx_peer_unmap_handler(soc, peer_ids[i],
  3851. vdev->vdev_id,
  3852. peer->mac_addr.raw, 0);
  3853. }
  3854. }
  3855. qdf_mem_free(peer_ids);
  3856. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3857. FL("Flushed peers for vdev object %pK "), vdev);
  3858. }
  3859. /*
  3860. * dp_vdev_detach_wifi3() - Detach txrx vdev
  3861. * @txrx_vdev: Datapath VDEV handle
  3862. * @callback: Callback OL_IF on completion of detach
  3863. * @cb_context: Callback context
  3864. *
  3865. */
  3866. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  3867. ol_txrx_vdev_delete_cb callback, void *cb_context)
  3868. {
  3869. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3870. struct dp_pdev *pdev = vdev->pdev;
  3871. struct dp_soc *soc = pdev->soc;
  3872. struct dp_neighbour_peer *peer = NULL;
  3873. struct dp_neighbour_peer *temp_peer = NULL;
  3874. /* preconditions */
  3875. qdf_assert(vdev);
  3876. if (wlan_op_mode_monitor == vdev->opmode)
  3877. goto free_vdev;
  3878. if (wlan_op_mode_sta == vdev->opmode)
  3879. dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
  3880. /*
  3881. * If Target is hung, flush all peers before detaching vdev
  3882. * this will free all references held due to missing
  3883. * unmap commands from Target
  3884. */
  3885. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  3886. dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
  3887. /*
  3888. * Use peer_ref_mutex while accessing peer_list, in case
  3889. * a peer is in the process of being removed from the list.
  3890. */
  3891. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3892. /* check that the vdev has no peers allocated */
  3893. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  3894. /* debug print - will be removed later */
  3895. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  3896. FL("not deleting vdev object %pK (%pM)"
  3897. "until deletion finishes for all its peers"),
  3898. vdev, vdev->mac_addr.raw);
  3899. /* indicate that the vdev needs to be deleted */
  3900. vdev->delete.pending = 1;
  3901. vdev->delete.callback = callback;
  3902. vdev->delete.context = cb_context;
  3903. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3904. return;
  3905. }
  3906. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3907. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  3908. if (!soc->hw_nac_monitor_support) {
  3909. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  3910. neighbour_peer_list_elem) {
  3911. QDF_ASSERT(peer->vdev != vdev);
  3912. }
  3913. } else {
  3914. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3915. neighbour_peer_list_elem, temp_peer) {
  3916. if (peer->vdev == vdev) {
  3917. TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
  3918. neighbour_peer_list_elem);
  3919. qdf_mem_free(peer);
  3920. }
  3921. }
  3922. }
  3923. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  3924. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3925. dp_tx_vdev_detach(vdev);
  3926. /* remove the vdev from its parent pdev's list */
  3927. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  3928. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3929. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  3930. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3931. free_vdev:
  3932. qdf_mem_free(vdev);
  3933. if (callback)
  3934. callback(cb_context);
  3935. }
  3936. /*
  3937. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  3938. * @soc - datapath soc handle
  3939. * @peer - datapath peer handle
  3940. *
  3941. * Delete the AST entries belonging to a peer
  3942. */
  3943. #ifdef FEATURE_AST
  3944. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3945. struct dp_peer *peer)
  3946. {
  3947. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  3948. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  3949. dp_peer_del_ast(soc, ast_entry);
  3950. peer->self_ast_entry = NULL;
  3951. }
  3952. #else
  3953. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3954. struct dp_peer *peer)
  3955. {
  3956. }
  3957. #endif
  3958. #if ATH_SUPPORT_WRAP
  3959. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3960. uint8_t *peer_mac_addr)
  3961. {
  3962. struct dp_peer *peer;
  3963. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3964. 0, vdev->vdev_id);
  3965. if (!peer)
  3966. return NULL;
  3967. if (peer->bss_peer)
  3968. return peer;
  3969. dp_peer_unref_delete(peer);
  3970. return NULL;
  3971. }
  3972. #else
  3973. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3974. uint8_t *peer_mac_addr)
  3975. {
  3976. struct dp_peer *peer;
  3977. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3978. 0, vdev->vdev_id);
  3979. if (!peer)
  3980. return NULL;
  3981. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  3982. return peer;
  3983. dp_peer_unref_delete(peer);
  3984. return NULL;
  3985. }
  3986. #endif
  3987. #ifdef FEATURE_AST
  3988. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3989. uint8_t *peer_mac_addr)
  3990. {
  3991. struct dp_ast_entry *ast_entry;
  3992. qdf_spin_lock_bh(&soc->ast_lock);
  3993. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3994. if (ast_entry && ast_entry->next_hop &&
  3995. !ast_entry->delete_in_progress)
  3996. dp_peer_del_ast(soc, ast_entry);
  3997. qdf_spin_unlock_bh(&soc->ast_lock);
  3998. }
  3999. #endif
  4000. /*
  4001. * dp_peer_create_wifi3() - attach txrx peer
  4002. * @txrx_vdev: Datapath VDEV handle
  4003. * @peer_mac_addr: Peer MAC address
  4004. *
  4005. * Return: DP peeer handle on success, NULL on failure
  4006. */
  4007. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  4008. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  4009. {
  4010. struct dp_peer *peer;
  4011. int i;
  4012. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4013. struct dp_pdev *pdev;
  4014. struct dp_soc *soc;
  4015. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  4016. /* preconditions */
  4017. qdf_assert(vdev);
  4018. qdf_assert(peer_mac_addr);
  4019. pdev = vdev->pdev;
  4020. soc = pdev->soc;
  4021. /*
  4022. * If a peer entry with given MAC address already exists,
  4023. * reuse the peer and reset the state of peer.
  4024. */
  4025. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  4026. if (peer) {
  4027. qdf_atomic_init(&peer->is_default_route_set);
  4028. dp_peer_cleanup(vdev, peer);
  4029. qdf_spin_lock_bh(&soc->ast_lock);
  4030. dp_peer_delete_ast_entries(soc, peer);
  4031. peer->delete_in_progress = false;
  4032. qdf_spin_unlock_bh(&soc->ast_lock);
  4033. if ((vdev->opmode == wlan_op_mode_sta) &&
  4034. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4035. DP_MAC_ADDR_LEN)) {
  4036. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4037. }
  4038. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4039. /*
  4040. * Control path maintains a node count which is incremented
  4041. * for every new peer create command. Since new peer is not being
  4042. * created and earlier reference is reused here,
  4043. * peer_unref_delete event is sent to control path to
  4044. * increment the count back.
  4045. */
  4046. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  4047. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4048. peer->mac_addr.raw, vdev->mac_addr.raw,
  4049. vdev->opmode, peer->ctrl_peer, ctrl_peer);
  4050. }
  4051. peer->ctrl_peer = ctrl_peer;
  4052. dp_local_peer_id_alloc(pdev, peer);
  4053. DP_STATS_INIT(peer);
  4054. return (void *)peer;
  4055. } else {
  4056. /*
  4057. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  4058. * need to remove the AST entry which was earlier added as a WDS
  4059. * entry.
  4060. * If an AST entry exists, but no peer entry exists with a given
  4061. * MAC addresses, we could deduce it as a WDS entry
  4062. */
  4063. dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
  4064. }
  4065. #ifdef notyet
  4066. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  4067. soc->mempool_ol_ath_peer);
  4068. #else
  4069. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  4070. #endif
  4071. if (!peer)
  4072. return NULL; /* failure */
  4073. qdf_mem_zero(peer, sizeof(struct dp_peer));
  4074. TAILQ_INIT(&peer->ast_entry_list);
  4075. /* store provided params */
  4076. peer->vdev = vdev;
  4077. peer->ctrl_peer = ctrl_peer;
  4078. if ((vdev->opmode == wlan_op_mode_sta) &&
  4079. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4080. DP_MAC_ADDR_LEN)) {
  4081. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4082. }
  4083. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4084. qdf_spinlock_create(&peer->peer_info_lock);
  4085. qdf_mem_copy(
  4086. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  4087. /* TODO: See of rx_opt_proc is really required */
  4088. peer->rx_opt_proc = soc->rx_opt_proc;
  4089. /* initialize the peer_id */
  4090. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4091. peer->peer_ids[i] = HTT_INVALID_PEER;
  4092. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4093. qdf_atomic_init(&peer->ref_cnt);
  4094. /* keep one reference for attach */
  4095. qdf_atomic_inc(&peer->ref_cnt);
  4096. /* add this peer into the vdev's list */
  4097. if (wlan_op_mode_sta == vdev->opmode)
  4098. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  4099. else
  4100. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  4101. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4102. /* TODO: See if hash based search is required */
  4103. dp_peer_find_hash_add(soc, peer);
  4104. /* Initialize the peer state */
  4105. peer->state = OL_TXRX_PEER_STATE_DISC;
  4106. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4107. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  4108. vdev, peer, peer->mac_addr.raw,
  4109. qdf_atomic_read(&peer->ref_cnt));
  4110. /*
  4111. * For every peer MAp message search and set if bss_peer
  4112. */
  4113. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  4114. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4115. "vdev bss_peer!!!!");
  4116. peer->bss_peer = 1;
  4117. vdev->vap_bss_peer = peer;
  4118. }
  4119. for (i = 0; i < DP_MAX_TIDS; i++)
  4120. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  4121. dp_local_peer_id_alloc(pdev, peer);
  4122. DP_STATS_INIT(peer);
  4123. return (void *)peer;
  4124. }
  4125. /*
  4126. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  4127. * @vdev: Datapath VDEV handle
  4128. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4129. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4130. *
  4131. * Return: None
  4132. */
  4133. static
  4134. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4135. enum cdp_host_reo_dest_ring *reo_dest,
  4136. bool *hash_based)
  4137. {
  4138. struct dp_soc *soc;
  4139. struct dp_pdev *pdev;
  4140. pdev = vdev->pdev;
  4141. soc = pdev->soc;
  4142. /*
  4143. * hash based steering is disabled for Radios which are offloaded
  4144. * to NSS
  4145. */
  4146. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4147. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4148. /*
  4149. * Below line of code will ensure the proper reo_dest ring is chosen
  4150. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4151. */
  4152. *reo_dest = pdev->reo_dest;
  4153. }
  4154. #ifdef IPA_OFFLOAD
  4155. /*
  4156. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4157. * @vdev: Datapath VDEV handle
  4158. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4159. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4160. *
  4161. * If IPA is enabled in ini, for SAP mode, disable hash based
  4162. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4163. * Return: None
  4164. */
  4165. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4166. enum cdp_host_reo_dest_ring *reo_dest,
  4167. bool *hash_based)
  4168. {
  4169. struct dp_soc *soc;
  4170. struct dp_pdev *pdev;
  4171. pdev = vdev->pdev;
  4172. soc = pdev->soc;
  4173. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4174. /*
  4175. * If IPA is enabled, disable hash-based flow steering and set
  4176. * reo_dest_ring_4 as the REO ring to receive packets on.
  4177. * IPA is configured to reap reo_dest_ring_4.
  4178. *
  4179. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4180. * value enum value is from 1 - 4.
  4181. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4182. */
  4183. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4184. if (vdev->opmode == wlan_op_mode_ap) {
  4185. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4186. *hash_based = 0;
  4187. }
  4188. }
  4189. }
  4190. #else
  4191. /*
  4192. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4193. * @vdev: Datapath VDEV handle
  4194. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4195. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4196. *
  4197. * Use system config values for hash based steering.
  4198. * Return: None
  4199. */
  4200. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4201. enum cdp_host_reo_dest_ring *reo_dest,
  4202. bool *hash_based)
  4203. {
  4204. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4205. }
  4206. #endif /* IPA_OFFLOAD */
  4207. /*
  4208. * dp_peer_setup_wifi3() - initialize the peer
  4209. * @vdev_hdl: virtual device object
  4210. * @peer: Peer object
  4211. *
  4212. * Return: void
  4213. */
  4214. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4215. {
  4216. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  4217. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  4218. struct dp_pdev *pdev;
  4219. struct dp_soc *soc;
  4220. bool hash_based = 0;
  4221. enum cdp_host_reo_dest_ring reo_dest;
  4222. /* preconditions */
  4223. qdf_assert(vdev);
  4224. qdf_assert(peer);
  4225. pdev = vdev->pdev;
  4226. soc = pdev->soc;
  4227. peer->last_assoc_rcvd = 0;
  4228. peer->last_disassoc_rcvd = 0;
  4229. peer->last_deauth_rcvd = 0;
  4230. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4231. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4232. pdev->pdev_id, vdev->vdev_id,
  4233. vdev->opmode, hash_based, reo_dest);
  4234. /*
  4235. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4236. * i.e both the devices have same MAC address. In these
  4237. * cases we want such pkts to be processed in NULL Q handler
  4238. * which is REO2TCL ring. for this reason we should
  4239. * not setup reo_queues and default route for bss_peer.
  4240. */
  4241. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
  4242. return;
  4243. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4244. /* TODO: Check the destination ring number to be passed to FW */
  4245. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4246. pdev->ctrl_pdev, peer->mac_addr.raw,
  4247. peer->vdev->vdev_id, hash_based, reo_dest);
  4248. }
  4249. qdf_atomic_set(&peer->is_default_route_set, 1);
  4250. dp_peer_rx_init(pdev, peer);
  4251. return;
  4252. }
  4253. /*
  4254. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  4255. * @vdev_handle: virtual device object
  4256. * @htt_pkt_type: type of pkt
  4257. *
  4258. * Return: void
  4259. */
  4260. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  4261. enum htt_cmn_pkt_type val)
  4262. {
  4263. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4264. vdev->tx_encap_type = val;
  4265. }
  4266. /*
  4267. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  4268. * @vdev_handle: virtual device object
  4269. * @htt_pkt_type: type of pkt
  4270. *
  4271. * Return: void
  4272. */
  4273. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  4274. enum htt_cmn_pkt_type val)
  4275. {
  4276. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4277. vdev->rx_decap_type = val;
  4278. }
  4279. /*
  4280. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  4281. * @txrx_soc: cdp soc handle
  4282. * @ac: Access category
  4283. * @value: timeout value in millisec
  4284. *
  4285. * Return: void
  4286. */
  4287. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4288. uint8_t ac, uint32_t value)
  4289. {
  4290. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4291. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  4292. }
  4293. /*
  4294. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  4295. * @txrx_soc: cdp soc handle
  4296. * @ac: access category
  4297. * @value: timeout value in millisec
  4298. *
  4299. * Return: void
  4300. */
  4301. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4302. uint8_t ac, uint32_t *value)
  4303. {
  4304. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4305. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  4306. }
  4307. /*
  4308. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  4309. * @pdev_handle: physical device object
  4310. * @val: reo destination ring index (1 - 4)
  4311. *
  4312. * Return: void
  4313. */
  4314. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  4315. enum cdp_host_reo_dest_ring val)
  4316. {
  4317. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4318. if (pdev)
  4319. pdev->reo_dest = val;
  4320. }
  4321. /*
  4322. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  4323. * @pdev_handle: physical device object
  4324. *
  4325. * Return: reo destination ring index
  4326. */
  4327. static enum cdp_host_reo_dest_ring
  4328. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  4329. {
  4330. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4331. if (pdev)
  4332. return pdev->reo_dest;
  4333. else
  4334. return cdp_host_reo_dest_ring_unknown;
  4335. }
  4336. /*
  4337. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  4338. * @pdev_handle: device object
  4339. * @val: value to be set
  4340. *
  4341. * Return: void
  4342. */
  4343. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  4344. uint32_t val)
  4345. {
  4346. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4347. /* Enable/Disable smart mesh filtering. This flag will be checked
  4348. * during rx processing to check if packets are from NAC clients.
  4349. */
  4350. pdev->filter_neighbour_peers = val;
  4351. return 0;
  4352. }
  4353. /*
  4354. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  4355. * address for smart mesh filtering
  4356. * @vdev_handle: virtual device object
  4357. * @cmd: Add/Del command
  4358. * @macaddr: nac client mac address
  4359. *
  4360. * Return: void
  4361. */
  4362. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  4363. uint32_t cmd, uint8_t *macaddr)
  4364. {
  4365. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4366. struct dp_pdev *pdev = vdev->pdev;
  4367. struct dp_neighbour_peer *peer = NULL;
  4368. if (!macaddr)
  4369. goto fail0;
  4370. /* Store address of NAC (neighbour peer) which will be checked
  4371. * against TA of received packets.
  4372. */
  4373. if (cmd == DP_NAC_PARAM_ADD) {
  4374. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  4375. sizeof(*peer));
  4376. if (!peer) {
  4377. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4378. FL("DP neighbour peer node memory allocation failed"));
  4379. goto fail0;
  4380. }
  4381. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  4382. macaddr, DP_MAC_ADDR_LEN);
  4383. peer->vdev = vdev;
  4384. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4385. /* add this neighbour peer into the list */
  4386. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  4387. neighbour_peer_list_elem);
  4388. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4389. /* first neighbour */
  4390. if (!pdev->neighbour_peers_added) {
  4391. pdev->neighbour_peers_added = true;
  4392. dp_ppdu_ring_cfg(pdev);
  4393. }
  4394. return 1;
  4395. } else if (cmd == DP_NAC_PARAM_DEL) {
  4396. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4397. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4398. neighbour_peer_list_elem) {
  4399. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  4400. macaddr, DP_MAC_ADDR_LEN)) {
  4401. /* delete this peer from the list */
  4402. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  4403. peer, neighbour_peer_list_elem);
  4404. qdf_mem_free(peer);
  4405. break;
  4406. }
  4407. }
  4408. /* last neighbour deleted */
  4409. if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
  4410. pdev->neighbour_peers_added = false;
  4411. dp_ppdu_ring_cfg(pdev);
  4412. }
  4413. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4414. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  4415. !pdev->enhanced_stats_en)
  4416. dp_ppdu_ring_reset(pdev);
  4417. return 1;
  4418. }
  4419. fail0:
  4420. return 0;
  4421. }
  4422. /*
  4423. * dp_get_sec_type() - Get the security type
  4424. * @peer: Datapath peer handle
  4425. * @sec_idx: Security id (mcast, ucast)
  4426. *
  4427. * return sec_type: Security type
  4428. */
  4429. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  4430. {
  4431. struct dp_peer *dpeer = (struct dp_peer *)peer;
  4432. return dpeer->security[sec_idx].sec_type;
  4433. }
  4434. /*
  4435. * dp_peer_authorize() - authorize txrx peer
  4436. * @peer_handle: Datapath peer handle
  4437. * @authorize
  4438. *
  4439. */
  4440. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  4441. {
  4442. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4443. struct dp_soc *soc;
  4444. if (peer != NULL) {
  4445. soc = peer->vdev->pdev->soc;
  4446. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4447. peer->authorize = authorize ? 1 : 0;
  4448. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4449. }
  4450. }
  4451. static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
  4452. struct dp_pdev *pdev,
  4453. struct dp_peer *peer,
  4454. uint32_t vdev_id)
  4455. {
  4456. struct dp_vdev *vdev = NULL;
  4457. struct dp_peer *bss_peer = NULL;
  4458. uint8_t *m_addr = NULL;
  4459. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4460. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4461. if (vdev->vdev_id == vdev_id)
  4462. break;
  4463. }
  4464. if (!vdev) {
  4465. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4466. "vdev is NULL");
  4467. } else {
  4468. if (vdev->vap_bss_peer == peer)
  4469. vdev->vap_bss_peer = NULL;
  4470. m_addr = peer->mac_addr.raw;
  4471. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  4472. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4473. m_addr, vdev->mac_addr.raw, vdev->opmode,
  4474. peer->ctrl_peer, NULL);
  4475. if (vdev && vdev->vap_bss_peer) {
  4476. bss_peer = vdev->vap_bss_peer;
  4477. DP_UPDATE_STATS(vdev, peer);
  4478. }
  4479. }
  4480. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4481. /*
  4482. * Peer AST list hast to be empty here
  4483. */
  4484. DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
  4485. qdf_mem_free(peer);
  4486. }
  4487. /**
  4488. * dp_delete_pending_vdev() - check and process vdev delete
  4489. * @pdev: DP specific pdev pointer
  4490. * @vdev: DP specific vdev pointer
  4491. * @vdev_id: vdev id corresponding to vdev
  4492. *
  4493. * This API does following:
  4494. * 1) It releases tx flow pools buffers as vdev is
  4495. * going down and no peers are associated.
  4496. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  4497. */
  4498. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4499. uint8_t vdev_id)
  4500. {
  4501. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  4502. void *vdev_delete_context = NULL;
  4503. vdev_delete_cb = vdev->delete.callback;
  4504. vdev_delete_context = vdev->delete.context;
  4505. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4506. FL("deleting vdev object %pK (%pM)- its last peer is done"),
  4507. vdev, vdev->mac_addr.raw);
  4508. /* all peers are gone, go ahead and delete it */
  4509. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  4510. FLOW_TYPE_VDEV, vdev_id);
  4511. dp_tx_vdev_detach(vdev);
  4512. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4513. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4514. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4515. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4516. FL("deleting vdev object %pK (%pM)"),
  4517. vdev, vdev->mac_addr.raw);
  4518. qdf_mem_free(vdev);
  4519. vdev = NULL;
  4520. if (vdev_delete_cb)
  4521. vdev_delete_cb(vdev_delete_context);
  4522. }
  4523. /*
  4524. * dp_peer_unref_delete() - unref and delete peer
  4525. * @peer_handle: Datapath peer handle
  4526. *
  4527. */
  4528. void dp_peer_unref_delete(void *peer_handle)
  4529. {
  4530. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4531. struct dp_vdev *vdev = peer->vdev;
  4532. struct dp_pdev *pdev = vdev->pdev;
  4533. struct dp_soc *soc = pdev->soc;
  4534. struct dp_peer *tmppeer;
  4535. int found = 0;
  4536. uint16_t peer_id;
  4537. uint16_t vdev_id;
  4538. bool delete_vdev;
  4539. /*
  4540. * Hold the lock all the way from checking if the peer ref count
  4541. * is zero until the peer references are removed from the hash
  4542. * table and vdev list (if the peer ref count is zero).
  4543. * This protects against a new HL tx operation starting to use the
  4544. * peer object just after this function concludes it's done being used.
  4545. * Furthermore, the lock needs to be held while checking whether the
  4546. * vdev's list of peers is empty, to make sure that list is not modified
  4547. * concurrently with the empty check.
  4548. */
  4549. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4550. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  4551. peer_id = peer->peer_ids[0];
  4552. vdev_id = vdev->vdev_id;
  4553. /*
  4554. * Make sure that the reference to the peer in
  4555. * peer object map is removed
  4556. */
  4557. if (peer_id != HTT_INVALID_PEER)
  4558. soc->peer_id_to_obj_map[peer_id] = NULL;
  4559. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4560. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  4561. /* remove the reference to the peer from the hash table */
  4562. dp_peer_find_hash_remove(soc, peer);
  4563. qdf_spin_lock_bh(&soc->ast_lock);
  4564. if (peer->self_ast_entry) {
  4565. dp_peer_del_ast(soc, peer->self_ast_entry);
  4566. peer->self_ast_entry = NULL;
  4567. }
  4568. qdf_spin_unlock_bh(&soc->ast_lock);
  4569. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  4570. if (tmppeer == peer) {
  4571. found = 1;
  4572. break;
  4573. }
  4574. }
  4575. if (found) {
  4576. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  4577. peer_list_elem);
  4578. } else {
  4579. /*Ignoring the remove operation as peer not found*/
  4580. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4581. "peer:%pK not found in vdev:%pK peerlist:%pK",
  4582. peer, vdev, &peer->vdev->peer_list);
  4583. }
  4584. /* cleanup the peer data */
  4585. dp_peer_cleanup(vdev, peer);
  4586. /* check whether the parent vdev has no peers left */
  4587. if (TAILQ_EMPTY(&vdev->peer_list)) {
  4588. /*
  4589. * capture vdev delete pending flag's status
  4590. * while holding peer_ref_mutex lock
  4591. */
  4592. delete_vdev = vdev->delete.pending;
  4593. /*
  4594. * Now that there are no references to the peer, we can
  4595. * release the peer reference lock.
  4596. */
  4597. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4598. /*
  4599. * Check if the parent vdev was waiting for its peers
  4600. * to be deleted, in order for it to be deleted too.
  4601. */
  4602. if (delete_vdev)
  4603. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  4604. } else {
  4605. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4606. }
  4607. dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
  4608. } else {
  4609. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4610. }
  4611. }
  4612. /*
  4613. * dp_peer_detach_wifi3() – Detach txrx peer
  4614. * @peer_handle: Datapath peer handle
  4615. * @bitmap: bitmap indicating special handling of request.
  4616. *
  4617. */
  4618. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  4619. {
  4620. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4621. /* redirect the peer's rx delivery function to point to a
  4622. * discard func
  4623. */
  4624. peer->rx_opt_proc = dp_rx_discard;
  4625. /* Do not make ctrl_peer to NULL for connected sta peers.
  4626. * We need ctrl_peer to release the reference during dp
  4627. * peer free. This reference was held for
  4628. * obj_mgr peer during the creation of dp peer.
  4629. */
  4630. if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
  4631. !peer->bss_peer))
  4632. peer->ctrl_peer = NULL;
  4633. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4634. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  4635. dp_local_peer_id_free(peer->vdev->pdev, peer);
  4636. qdf_spinlock_destroy(&peer->peer_info_lock);
  4637. /*
  4638. * Remove the reference added during peer_attach.
  4639. * The peer will still be left allocated until the
  4640. * PEER_UNMAP message arrives to remove the other
  4641. * reference, added by the PEER_MAP message.
  4642. */
  4643. dp_peer_unref_delete(peer_handle);
  4644. }
  4645. /*
  4646. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  4647. * @peer_handle: Datapath peer handle
  4648. *
  4649. */
  4650. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  4651. {
  4652. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4653. return vdev->mac_addr.raw;
  4654. }
  4655. /*
  4656. * dp_vdev_set_wds() - Enable per packet stats
  4657. * @vdev_handle: DP VDEV handle
  4658. * @val: value
  4659. *
  4660. * Return: none
  4661. */
  4662. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  4663. {
  4664. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4665. vdev->wds_enabled = val;
  4666. return 0;
  4667. }
  4668. /*
  4669. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  4670. * @peer_handle: Datapath peer handle
  4671. *
  4672. */
  4673. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  4674. uint8_t vdev_id)
  4675. {
  4676. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4677. struct dp_vdev *vdev = NULL;
  4678. if (qdf_unlikely(!pdev))
  4679. return NULL;
  4680. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4681. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4682. if (vdev->vdev_id == vdev_id)
  4683. break;
  4684. }
  4685. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4686. return (struct cdp_vdev *)vdev;
  4687. }
  4688. /*
  4689. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
  4690. * @dev: PDEV handle
  4691. *
  4692. * Return: VDEV handle of monitor mode
  4693. */
  4694. static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
  4695. {
  4696. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4697. if (qdf_unlikely(!pdev))
  4698. return NULL;
  4699. return (struct cdp_vdev *)pdev->monitor_vdev;
  4700. }
  4701. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  4702. {
  4703. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4704. return vdev->opmode;
  4705. }
  4706. static
  4707. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
  4708. ol_txrx_rx_fp *stack_fn_p,
  4709. ol_osif_vdev_handle *osif_vdev_p)
  4710. {
  4711. struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
  4712. qdf_assert(vdev);
  4713. *stack_fn_p = vdev->osif_rx_stack;
  4714. *osif_vdev_p = vdev->osif_vdev;
  4715. }
  4716. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  4717. {
  4718. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4719. struct dp_pdev *pdev = vdev->pdev;
  4720. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  4721. }
  4722. /**
  4723. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  4724. * ring based on target
  4725. * @soc: soc handle
  4726. * @mac_for_pdev: pdev_id
  4727. * @pdev: physical device handle
  4728. * @ring_num: mac id
  4729. * @htt_tlv_filter: tlv filter
  4730. *
  4731. * Return: zero on success, non-zero on failure
  4732. */
  4733. static inline
  4734. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  4735. struct dp_pdev *pdev, uint8_t ring_num,
  4736. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  4737. {
  4738. QDF_STATUS status;
  4739. if (soc->wlan_cfg_ctx->rxdma1_enable)
  4740. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4741. pdev->rxdma_mon_buf_ring[ring_num]
  4742. .hal_srng,
  4743. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  4744. &htt_tlv_filter);
  4745. else
  4746. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4747. pdev->rx_mac_buf_ring[ring_num]
  4748. .hal_srng,
  4749. RXDMA_BUF, RX_BUFFER_SIZE,
  4750. &htt_tlv_filter);
  4751. return status;
  4752. }
  4753. /**
  4754. * dp_reset_monitor_mode() - Disable monitor mode
  4755. * @pdev_handle: Datapath PDEV handle
  4756. *
  4757. * Return: 0 on success, not 0 on failure
  4758. */
  4759. static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  4760. {
  4761. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4762. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4763. struct dp_soc *soc = pdev->soc;
  4764. uint8_t pdev_id;
  4765. int mac_id;
  4766. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4767. pdev_id = pdev->pdev_id;
  4768. soc = pdev->soc;
  4769. qdf_spin_lock_bh(&pdev->mon_lock);
  4770. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4771. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4772. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4773. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4774. pdev, mac_id,
  4775. htt_tlv_filter);
  4776. if (status != QDF_STATUS_SUCCESS) {
  4777. dp_err("Failed to send tlv filter for monitor mode rings");
  4778. return status;
  4779. }
  4780. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4781. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4782. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  4783. &htt_tlv_filter);
  4784. }
  4785. pdev->monitor_vdev = NULL;
  4786. pdev->mcopy_mode = 0;
  4787. pdev->monitor_configured = false;
  4788. qdf_spin_unlock_bh(&pdev->mon_lock);
  4789. return QDF_STATUS_SUCCESS;
  4790. }
  4791. /**
  4792. * dp_set_nac() - set peer_nac
  4793. * @peer_handle: Datapath PEER handle
  4794. *
  4795. * Return: void
  4796. */
  4797. static void dp_set_nac(struct cdp_peer *peer_handle)
  4798. {
  4799. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4800. peer->nac = 1;
  4801. }
  4802. /**
  4803. * dp_get_tx_pending() - read pending tx
  4804. * @pdev_handle: Datapath PDEV handle
  4805. *
  4806. * Return: outstanding tx
  4807. */
  4808. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  4809. {
  4810. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4811. return qdf_atomic_read(&pdev->num_tx_outstanding);
  4812. }
  4813. /**
  4814. * dp_get_peer_mac_from_peer_id() - get peer mac
  4815. * @pdev_handle: Datapath PDEV handle
  4816. * @peer_id: Peer ID
  4817. * @peer_mac: MAC addr of PEER
  4818. *
  4819. * Return: void
  4820. */
  4821. static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
  4822. uint32_t peer_id, uint8_t *peer_mac)
  4823. {
  4824. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4825. struct dp_peer *peer;
  4826. if (pdev && peer_mac) {
  4827. peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
  4828. if (peer) {
  4829. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  4830. DP_MAC_ADDR_LEN);
  4831. dp_peer_unref_del_find_by_id(peer);
  4832. }
  4833. }
  4834. }
  4835. /**
  4836. * dp_pdev_configure_monitor_rings() - configure monitor rings
  4837. * @vdev_handle: Datapath VDEV handle
  4838. *
  4839. * Return: void
  4840. */
  4841. static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
  4842. {
  4843. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4844. struct dp_soc *soc;
  4845. uint8_t pdev_id;
  4846. int mac_id;
  4847. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4848. pdev_id = pdev->pdev_id;
  4849. soc = pdev->soc;
  4850. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4851. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4852. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4853. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4854. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4855. pdev->mo_data_filter);
  4856. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4857. htt_tlv_filter.mpdu_start = 1;
  4858. htt_tlv_filter.msdu_start = 1;
  4859. htt_tlv_filter.packet = 1;
  4860. htt_tlv_filter.msdu_end = 1;
  4861. htt_tlv_filter.mpdu_end = 1;
  4862. htt_tlv_filter.packet_header = 1;
  4863. htt_tlv_filter.attention = 1;
  4864. htt_tlv_filter.ppdu_start = 0;
  4865. htt_tlv_filter.ppdu_end = 0;
  4866. htt_tlv_filter.ppdu_end_user_stats = 0;
  4867. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4868. htt_tlv_filter.ppdu_end_status_done = 0;
  4869. htt_tlv_filter.header_per_msdu = 1;
  4870. htt_tlv_filter.enable_fp =
  4871. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4872. htt_tlv_filter.enable_md = 0;
  4873. htt_tlv_filter.enable_mo =
  4874. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4875. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4876. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4877. if (pdev->mcopy_mode)
  4878. htt_tlv_filter.fp_data_filter = 0;
  4879. else
  4880. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4881. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4882. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4883. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4884. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4885. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4886. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4887. pdev, mac_id,
  4888. htt_tlv_filter);
  4889. if (status != QDF_STATUS_SUCCESS) {
  4890. dp_err("Failed to send tlv filter for monitor mode rings");
  4891. return status;
  4892. }
  4893. }
  4894. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4895. htt_tlv_filter.mpdu_start = 1;
  4896. htt_tlv_filter.msdu_start = 0;
  4897. htt_tlv_filter.packet = 0;
  4898. htt_tlv_filter.msdu_end = 0;
  4899. htt_tlv_filter.mpdu_end = 0;
  4900. htt_tlv_filter.attention = 0;
  4901. htt_tlv_filter.ppdu_start = 1;
  4902. htt_tlv_filter.ppdu_end = 1;
  4903. htt_tlv_filter.ppdu_end_user_stats = 1;
  4904. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4905. htt_tlv_filter.ppdu_end_status_done = 1;
  4906. htt_tlv_filter.enable_fp = 1;
  4907. htt_tlv_filter.enable_md = 0;
  4908. htt_tlv_filter.enable_mo = 1;
  4909. if (pdev->mcopy_mode) {
  4910. htt_tlv_filter.packet_header = 1;
  4911. }
  4912. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4913. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4914. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4915. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4916. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4917. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4918. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4919. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4920. pdev->pdev_id);
  4921. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4922. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4923. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4924. }
  4925. return status;
  4926. }
  4927. /**
  4928. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  4929. * @vdev_handle: Datapath VDEV handle
  4930. * @smart_monitor: Flag to denote if its smart monitor mode
  4931. *
  4932. * Return: 0 on success, not 0 on failure
  4933. */
  4934. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  4935. uint8_t smart_monitor)
  4936. {
  4937. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4938. struct dp_pdev *pdev;
  4939. qdf_assert(vdev);
  4940. pdev = vdev->pdev;
  4941. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4942. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  4943. pdev, pdev->pdev_id, pdev->soc, vdev);
  4944. /*Check if current pdev's monitor_vdev exists */
  4945. if (pdev->monitor_configured) {
  4946. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4947. "monitor vap already created vdev=%pK\n", vdev);
  4948. qdf_assert(vdev);
  4949. return QDF_STATUS_E_RESOURCES;
  4950. }
  4951. pdev->monitor_vdev = vdev;
  4952. pdev->monitor_configured = true;
  4953. /* If smart monitor mode, do not configure monitor ring */
  4954. if (smart_monitor)
  4955. return QDF_STATUS_SUCCESS;
  4956. return dp_pdev_configure_monitor_rings(pdev);
  4957. }
  4958. /**
  4959. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  4960. * @pdev_handle: Datapath PDEV handle
  4961. * @filter_val: Flag to select Filter for monitor mode
  4962. * Return: 0 on success, not 0 on failure
  4963. */
  4964. static QDF_STATUS
  4965. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  4966. struct cdp_monitor_filter *filter_val)
  4967. {
  4968. /* Many monitor VAPs can exists in a system but only one can be up at
  4969. * anytime
  4970. */
  4971. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4972. struct dp_vdev *vdev = pdev->monitor_vdev;
  4973. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4974. struct dp_soc *soc;
  4975. uint8_t pdev_id;
  4976. int mac_id;
  4977. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4978. pdev_id = pdev->pdev_id;
  4979. soc = pdev->soc;
  4980. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4981. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4982. pdev, pdev_id, soc, vdev);
  4983. /*Check if current pdev's monitor_vdev exists */
  4984. if (!pdev->monitor_vdev) {
  4985. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4986. "vdev=%pK", vdev);
  4987. qdf_assert(vdev);
  4988. }
  4989. /* update filter mode, type in pdev structure */
  4990. pdev->mon_filter_mode = filter_val->mode;
  4991. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  4992. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  4993. pdev->fp_data_filter = filter_val->fp_data;
  4994. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  4995. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  4996. pdev->mo_data_filter = filter_val->mo_data;
  4997. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4998. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4999. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  5000. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  5001. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  5002. pdev->mo_data_filter);
  5003. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  5004. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5005. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5006. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5007. pdev, mac_id,
  5008. htt_tlv_filter);
  5009. if (status != QDF_STATUS_SUCCESS) {
  5010. dp_err("Failed to send tlv filter for monitor mode rings");
  5011. return status;
  5012. }
  5013. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5014. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5015. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5016. }
  5017. htt_tlv_filter.mpdu_start = 1;
  5018. htt_tlv_filter.msdu_start = 1;
  5019. htt_tlv_filter.packet = 1;
  5020. htt_tlv_filter.msdu_end = 1;
  5021. htt_tlv_filter.mpdu_end = 1;
  5022. htt_tlv_filter.packet_header = 1;
  5023. htt_tlv_filter.attention = 1;
  5024. htt_tlv_filter.ppdu_start = 0;
  5025. htt_tlv_filter.ppdu_end = 0;
  5026. htt_tlv_filter.ppdu_end_user_stats = 0;
  5027. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  5028. htt_tlv_filter.ppdu_end_status_done = 0;
  5029. htt_tlv_filter.header_per_msdu = 1;
  5030. htt_tlv_filter.enable_fp =
  5031. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  5032. htt_tlv_filter.enable_md = 0;
  5033. htt_tlv_filter.enable_mo =
  5034. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  5035. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  5036. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  5037. if (pdev->mcopy_mode)
  5038. htt_tlv_filter.fp_data_filter = 0;
  5039. else
  5040. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  5041. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  5042. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  5043. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  5044. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5045. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5046. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5047. pdev, mac_id,
  5048. htt_tlv_filter);
  5049. if (status != QDF_STATUS_SUCCESS) {
  5050. dp_err("Failed to send tlv filter for monitor mode rings");
  5051. return status;
  5052. }
  5053. }
  5054. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  5055. htt_tlv_filter.mpdu_start = 1;
  5056. htt_tlv_filter.msdu_start = 0;
  5057. htt_tlv_filter.packet = 0;
  5058. htt_tlv_filter.msdu_end = 0;
  5059. htt_tlv_filter.mpdu_end = 0;
  5060. htt_tlv_filter.attention = 0;
  5061. htt_tlv_filter.ppdu_start = 1;
  5062. htt_tlv_filter.ppdu_end = 1;
  5063. htt_tlv_filter.ppdu_end_user_stats = 1;
  5064. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5065. htt_tlv_filter.ppdu_end_status_done = 1;
  5066. htt_tlv_filter.enable_fp = 1;
  5067. htt_tlv_filter.enable_md = 0;
  5068. htt_tlv_filter.enable_mo = 1;
  5069. if (pdev->mcopy_mode) {
  5070. htt_tlv_filter.packet_header = 1;
  5071. }
  5072. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5073. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5074. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5075. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5076. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5077. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5078. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5079. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  5080. pdev->pdev_id);
  5081. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5082. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5083. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5084. }
  5085. return QDF_STATUS_SUCCESS;
  5086. }
  5087. /**
  5088. * dp_get_pdev_id_frm_pdev() - get pdev_id
  5089. * @pdev_handle: Datapath PDEV handle
  5090. *
  5091. * Return: pdev_id
  5092. */
  5093. static
  5094. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  5095. {
  5096. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5097. return pdev->pdev_id;
  5098. }
  5099. /**
  5100. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  5101. * @pdev_handle: Datapath PDEV handle
  5102. * @chan_noise_floor: Channel Noise Floor
  5103. *
  5104. * Return: void
  5105. */
  5106. static
  5107. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  5108. int16_t chan_noise_floor)
  5109. {
  5110. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5111. pdev->chan_noise_floor = chan_noise_floor;
  5112. }
  5113. /**
  5114. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  5115. * @vdev_handle: Datapath VDEV handle
  5116. * Return: true on ucast filter flag set
  5117. */
  5118. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  5119. {
  5120. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5121. struct dp_pdev *pdev;
  5122. pdev = vdev->pdev;
  5123. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  5124. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  5125. return true;
  5126. return false;
  5127. }
  5128. /**
  5129. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  5130. * @vdev_handle: Datapath VDEV handle
  5131. * Return: true on mcast filter flag set
  5132. */
  5133. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  5134. {
  5135. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5136. struct dp_pdev *pdev;
  5137. pdev = vdev->pdev;
  5138. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  5139. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  5140. return true;
  5141. return false;
  5142. }
  5143. /**
  5144. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  5145. * @vdev_handle: Datapath VDEV handle
  5146. * Return: true on non data filter flag set
  5147. */
  5148. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  5149. {
  5150. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5151. struct dp_pdev *pdev;
  5152. pdev = vdev->pdev;
  5153. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  5154. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  5155. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  5156. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  5157. return true;
  5158. }
  5159. }
  5160. return false;
  5161. }
  5162. #ifdef MESH_MODE_SUPPORT
  5163. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  5164. {
  5165. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5166. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5167. FL("val %d"), val);
  5168. vdev->mesh_vdev = val;
  5169. }
  5170. /*
  5171. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  5172. * @vdev_hdl: virtual device object
  5173. * @val: value to be set
  5174. *
  5175. * Return: void
  5176. */
  5177. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  5178. {
  5179. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5180. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5181. FL("val %d"), val);
  5182. vdev->mesh_rx_filter = val;
  5183. }
  5184. #endif
  5185. /*
  5186. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  5187. * Current scope is bar received count
  5188. *
  5189. * @pdev_handle: DP_PDEV handle
  5190. *
  5191. * Return: void
  5192. */
  5193. #define STATS_PROC_TIMEOUT (HZ/1000)
  5194. static void
  5195. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  5196. {
  5197. struct dp_vdev *vdev;
  5198. struct dp_peer *peer;
  5199. uint32_t waitcnt;
  5200. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5201. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5202. if (!peer) {
  5203. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5204. FL("DP Invalid Peer refernce"));
  5205. return;
  5206. }
  5207. if (peer->delete_in_progress) {
  5208. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5209. FL("DP Peer deletion in progress"));
  5210. continue;
  5211. }
  5212. qdf_atomic_inc(&peer->ref_cnt);
  5213. waitcnt = 0;
  5214. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  5215. while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
  5216. && waitcnt < 10) {
  5217. schedule_timeout_interruptible(
  5218. STATS_PROC_TIMEOUT);
  5219. waitcnt++;
  5220. }
  5221. qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
  5222. dp_peer_unref_delete(peer);
  5223. }
  5224. }
  5225. }
  5226. /**
  5227. * dp_rx_bar_stats_cb(): BAR received stats callback
  5228. * @soc: SOC handle
  5229. * @cb_ctxt: Call back context
  5230. * @reo_status: Reo status
  5231. *
  5232. * return: void
  5233. */
  5234. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  5235. union hal_reo_status *reo_status)
  5236. {
  5237. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  5238. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  5239. if (!qdf_atomic_read(&soc->cmn_init_done))
  5240. return;
  5241. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  5242. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  5243. queue_status->header.status);
  5244. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5245. return;
  5246. }
  5247. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  5248. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5249. }
  5250. /**
  5251. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  5252. * @vdev: DP VDEV handle
  5253. *
  5254. * return: void
  5255. */
  5256. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  5257. struct cdp_vdev_stats *vdev_stats)
  5258. {
  5259. struct dp_peer *peer = NULL;
  5260. struct dp_soc *soc = NULL;
  5261. if (!vdev || !vdev->pdev)
  5262. return;
  5263. soc = vdev->pdev->soc;
  5264. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  5265. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  5266. dp_update_vdev_stats(vdev_stats, peer);
  5267. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5268. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5269. vdev_stats, vdev->vdev_id,
  5270. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5271. #endif
  5272. }
  5273. /**
  5274. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  5275. * @pdev: DP PDEV handle
  5276. *
  5277. * return: void
  5278. */
  5279. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  5280. {
  5281. struct dp_vdev *vdev = NULL;
  5282. struct dp_soc *soc;
  5283. struct cdp_vdev_stats *vdev_stats =
  5284. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5285. if (!vdev_stats) {
  5286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5287. "DP alloc failure - unable to get alloc vdev stats");
  5288. return;
  5289. }
  5290. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  5291. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  5292. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  5293. if (pdev->mcopy_mode)
  5294. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  5295. soc = pdev->soc;
  5296. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5297. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5298. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5299. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5300. dp_update_pdev_stats(pdev, vdev_stats);
  5301. dp_update_pdev_ingress_stats(pdev, vdev);
  5302. }
  5303. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5304. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5305. qdf_mem_free(vdev_stats);
  5306. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5307. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  5308. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  5309. #endif
  5310. }
  5311. /**
  5312. * dp_vdev_getstats() - get vdev packet level stats
  5313. * @vdev_handle: Datapath VDEV handle
  5314. * @stats: cdp network device stats structure
  5315. *
  5316. * Return: void
  5317. */
  5318. static void dp_vdev_getstats(void *vdev_handle,
  5319. struct cdp_dev_stats *stats)
  5320. {
  5321. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5322. struct dp_pdev *pdev;
  5323. struct dp_soc *soc;
  5324. struct cdp_vdev_stats *vdev_stats;
  5325. if (!vdev)
  5326. return;
  5327. pdev = vdev->pdev;
  5328. if (!pdev)
  5329. return;
  5330. soc = pdev->soc;
  5331. vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5332. if (!vdev_stats) {
  5333. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5334. "DP alloc failure - unable to get alloc vdev stats");
  5335. return;
  5336. }
  5337. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5338. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5339. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5340. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  5341. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  5342. stats->tx_errors = vdev_stats->tx.tx_failed +
  5343. vdev_stats->tx_i.dropped.dropped_pkt.num;
  5344. stats->tx_dropped = stats->tx_errors;
  5345. stats->rx_packets = vdev_stats->rx.unicast.num +
  5346. vdev_stats->rx.multicast.num +
  5347. vdev_stats->rx.bcast.num;
  5348. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  5349. vdev_stats->rx.multicast.bytes +
  5350. vdev_stats->rx.bcast.bytes;
  5351. }
  5352. /**
  5353. * dp_pdev_getstats() - get pdev packet level stats
  5354. * @pdev_handle: Datapath PDEV handle
  5355. * @stats: cdp network device stats structure
  5356. *
  5357. * Return: void
  5358. */
  5359. static void dp_pdev_getstats(void *pdev_handle,
  5360. struct cdp_dev_stats *stats)
  5361. {
  5362. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5363. dp_aggregate_pdev_stats(pdev);
  5364. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  5365. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  5366. stats->tx_errors = pdev->stats.tx.tx_failed +
  5367. pdev->stats.tx_i.dropped.dropped_pkt.num;
  5368. stats->tx_dropped = stats->tx_errors;
  5369. stats->rx_packets = pdev->stats.rx.unicast.num +
  5370. pdev->stats.rx.multicast.num +
  5371. pdev->stats.rx.bcast.num;
  5372. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  5373. pdev->stats.rx.multicast.bytes +
  5374. pdev->stats.rx.bcast.bytes;
  5375. }
  5376. /**
  5377. * dp_get_device_stats() - get interface level packet stats
  5378. * @handle: device handle
  5379. * @stats: cdp network device stats structure
  5380. * @type: device type pdev/vdev
  5381. *
  5382. * Return: void
  5383. */
  5384. static void dp_get_device_stats(void *handle,
  5385. struct cdp_dev_stats *stats, uint8_t type)
  5386. {
  5387. switch (type) {
  5388. case UPDATE_VDEV_STATS:
  5389. dp_vdev_getstats(handle, stats);
  5390. break;
  5391. case UPDATE_PDEV_STATS:
  5392. dp_pdev_getstats(handle, stats);
  5393. break;
  5394. default:
  5395. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5396. "apstats cannot be updated for this input "
  5397. "type %d", type);
  5398. break;
  5399. }
  5400. }
  5401. /**
  5402. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  5403. * @pdev: DP_PDEV Handle
  5404. *
  5405. * Return:void
  5406. */
  5407. static inline void
  5408. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  5409. {
  5410. uint8_t index = 0;
  5411. DP_PRINT_STATS("PDEV Tx Stats:\n");
  5412. DP_PRINT_STATS("Received From Stack:");
  5413. DP_PRINT_STATS(" Packets = %d",
  5414. pdev->stats.tx_i.rcvd.num);
  5415. DP_PRINT_STATS(" Bytes = %llu",
  5416. pdev->stats.tx_i.rcvd.bytes);
  5417. DP_PRINT_STATS("Processed:");
  5418. DP_PRINT_STATS(" Packets = %d",
  5419. pdev->stats.tx_i.processed.num);
  5420. DP_PRINT_STATS(" Bytes = %llu",
  5421. pdev->stats.tx_i.processed.bytes);
  5422. DP_PRINT_STATS("Total Completions:");
  5423. DP_PRINT_STATS(" Packets = %u",
  5424. pdev->stats.tx.comp_pkt.num);
  5425. DP_PRINT_STATS(" Bytes = %llu",
  5426. pdev->stats.tx.comp_pkt.bytes);
  5427. DP_PRINT_STATS("Successful Completions:");
  5428. DP_PRINT_STATS(" Packets = %u",
  5429. pdev->stats.tx.tx_success.num);
  5430. DP_PRINT_STATS(" Bytes = %llu",
  5431. pdev->stats.tx.tx_success.bytes);
  5432. DP_PRINT_STATS("Dropped:");
  5433. DP_PRINT_STATS(" Total = %d",
  5434. pdev->stats.tx_i.dropped.dropped_pkt.num);
  5435. DP_PRINT_STATS(" Dma_map_error = %d",
  5436. pdev->stats.tx_i.dropped.dma_error);
  5437. DP_PRINT_STATS(" Ring Full = %d",
  5438. pdev->stats.tx_i.dropped.ring_full);
  5439. DP_PRINT_STATS(" Descriptor Not available = %d",
  5440. pdev->stats.tx_i.dropped.desc_na.num);
  5441. DP_PRINT_STATS(" HW enqueue failed= %d",
  5442. pdev->stats.tx_i.dropped.enqueue_fail);
  5443. DP_PRINT_STATS(" Resources Full = %d",
  5444. pdev->stats.tx_i.dropped.res_full);
  5445. DP_PRINT_STATS(" FW removed Pkts = %u",
  5446. pdev->stats.tx.dropped.fw_rem.num);
  5447. DP_PRINT_STATS(" FW removed bytes= %llu",
  5448. pdev->stats.tx.dropped.fw_rem.bytes);
  5449. DP_PRINT_STATS(" FW removed transmitted = %d",
  5450. pdev->stats.tx.dropped.fw_rem_tx);
  5451. DP_PRINT_STATS(" FW removed untransmitted = %d",
  5452. pdev->stats.tx.dropped.fw_rem_notx);
  5453. DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
  5454. pdev->stats.tx.dropped.fw_reason1);
  5455. DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
  5456. pdev->stats.tx.dropped.fw_reason2);
  5457. DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
  5458. pdev->stats.tx.dropped.fw_reason3);
  5459. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  5460. pdev->stats.tx.dropped.age_out);
  5461. DP_PRINT_STATS(" headroom insufficient = %d",
  5462. pdev->stats.tx_i.dropped.headroom_insufficient);
  5463. DP_PRINT_STATS(" Multicast:");
  5464. DP_PRINT_STATS(" Packets: %u",
  5465. pdev->stats.tx.mcast.num);
  5466. DP_PRINT_STATS(" Bytes: %llu",
  5467. pdev->stats.tx.mcast.bytes);
  5468. DP_PRINT_STATS("Scatter Gather:");
  5469. DP_PRINT_STATS(" Packets = %d",
  5470. pdev->stats.tx_i.sg.sg_pkt.num);
  5471. DP_PRINT_STATS(" Bytes = %llu",
  5472. pdev->stats.tx_i.sg.sg_pkt.bytes);
  5473. DP_PRINT_STATS(" Dropped By Host = %d",
  5474. pdev->stats.tx_i.sg.dropped_host.num);
  5475. DP_PRINT_STATS(" Dropped By Target = %d",
  5476. pdev->stats.tx_i.sg.dropped_target);
  5477. DP_PRINT_STATS("TSO:");
  5478. DP_PRINT_STATS(" Number of Segments = %d",
  5479. pdev->stats.tx_i.tso.num_seg);
  5480. DP_PRINT_STATS(" Packets = %d",
  5481. pdev->stats.tx_i.tso.tso_pkt.num);
  5482. DP_PRINT_STATS(" Bytes = %llu",
  5483. pdev->stats.tx_i.tso.tso_pkt.bytes);
  5484. DP_PRINT_STATS(" Dropped By Host = %d",
  5485. pdev->stats.tx_i.tso.dropped_host.num);
  5486. DP_PRINT_STATS("Mcast Enhancement:");
  5487. DP_PRINT_STATS(" Packets = %d",
  5488. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  5489. DP_PRINT_STATS(" Bytes = %llu",
  5490. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  5491. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  5492. pdev->stats.tx_i.mcast_en.dropped_map_error);
  5493. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  5494. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  5495. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  5496. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  5497. DP_PRINT_STATS(" Unicast sent = %d",
  5498. pdev->stats.tx_i.mcast_en.ucast);
  5499. DP_PRINT_STATS("Raw:");
  5500. DP_PRINT_STATS(" Packets = %d",
  5501. pdev->stats.tx_i.raw.raw_pkt.num);
  5502. DP_PRINT_STATS(" Bytes = %llu",
  5503. pdev->stats.tx_i.raw.raw_pkt.bytes);
  5504. DP_PRINT_STATS(" DMA map error = %d",
  5505. pdev->stats.tx_i.raw.dma_map_error);
  5506. DP_PRINT_STATS("Reinjected:");
  5507. DP_PRINT_STATS(" Packets = %d",
  5508. pdev->stats.tx_i.reinject_pkts.num);
  5509. DP_PRINT_STATS(" Bytes = %llu\n",
  5510. pdev->stats.tx_i.reinject_pkts.bytes);
  5511. DP_PRINT_STATS("Inspected:");
  5512. DP_PRINT_STATS(" Packets = %d",
  5513. pdev->stats.tx_i.inspect_pkts.num);
  5514. DP_PRINT_STATS(" Bytes = %llu",
  5515. pdev->stats.tx_i.inspect_pkts.bytes);
  5516. DP_PRINT_STATS("Nawds Multicast:");
  5517. DP_PRINT_STATS(" Packets = %d",
  5518. pdev->stats.tx_i.nawds_mcast.num);
  5519. DP_PRINT_STATS(" Bytes = %llu",
  5520. pdev->stats.tx_i.nawds_mcast.bytes);
  5521. DP_PRINT_STATS("CCE Classified:");
  5522. DP_PRINT_STATS(" CCE Classified Packets: %u",
  5523. pdev->stats.tx_i.cce_classified);
  5524. DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
  5525. pdev->stats.tx_i.cce_classified_raw);
  5526. DP_PRINT_STATS("Mesh stats:");
  5527. DP_PRINT_STATS(" frames to firmware: %u",
  5528. pdev->stats.tx_i.mesh.exception_fw);
  5529. DP_PRINT_STATS(" completions from fw: %u",
  5530. pdev->stats.tx_i.mesh.completion_fw);
  5531. DP_PRINT_STATS("PPDU stats counter");
  5532. for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
  5533. DP_PRINT_STATS(" Tag[%d] = %llu", index,
  5534. pdev->stats.ppdu_stats_counter[index]);
  5535. }
  5536. }
  5537. /**
  5538. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  5539. * @pdev: DP_PDEV Handle
  5540. *
  5541. * Return: void
  5542. */
  5543. static inline void
  5544. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  5545. {
  5546. DP_PRINT_STATS("PDEV Rx Stats:\n");
  5547. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  5548. DP_PRINT_STATS(" Packets = %d %d %d %d",
  5549. pdev->stats.rx.rcvd_reo[0].num,
  5550. pdev->stats.rx.rcvd_reo[1].num,
  5551. pdev->stats.rx.rcvd_reo[2].num,
  5552. pdev->stats.rx.rcvd_reo[3].num);
  5553. DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
  5554. pdev->stats.rx.rcvd_reo[0].bytes,
  5555. pdev->stats.rx.rcvd_reo[1].bytes,
  5556. pdev->stats.rx.rcvd_reo[2].bytes,
  5557. pdev->stats.rx.rcvd_reo[3].bytes);
  5558. DP_PRINT_STATS("Replenished:");
  5559. DP_PRINT_STATS(" Packets = %d",
  5560. pdev->stats.replenish.pkts.num);
  5561. DP_PRINT_STATS(" Bytes = %llu",
  5562. pdev->stats.replenish.pkts.bytes);
  5563. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  5564. pdev->stats.buf_freelist);
  5565. DP_PRINT_STATS(" Low threshold intr = %d",
  5566. pdev->stats.replenish.low_thresh_intrs);
  5567. DP_PRINT_STATS("Dropped:");
  5568. DP_PRINT_STATS(" msdu_not_done = %d",
  5569. pdev->stats.dropped.msdu_not_done);
  5570. DP_PRINT_STATS(" mon_rx_drop = %d",
  5571. pdev->stats.dropped.mon_rx_drop);
  5572. DP_PRINT_STATS(" mec_drop = %d",
  5573. pdev->stats.rx.mec_drop.num);
  5574. DP_PRINT_STATS(" Bytes = %llu",
  5575. pdev->stats.rx.mec_drop.bytes);
  5576. DP_PRINT_STATS("Sent To Stack:");
  5577. DP_PRINT_STATS(" Packets = %d",
  5578. pdev->stats.rx.to_stack.num);
  5579. DP_PRINT_STATS(" Bytes = %llu",
  5580. pdev->stats.rx.to_stack.bytes);
  5581. DP_PRINT_STATS("Multicast/Broadcast:");
  5582. DP_PRINT_STATS(" Packets = %d",
  5583. pdev->stats.rx.multicast.num);
  5584. DP_PRINT_STATS(" Bytes = %llu",
  5585. pdev->stats.rx.multicast.bytes);
  5586. DP_PRINT_STATS("Errors:");
  5587. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  5588. pdev->stats.replenish.rxdma_err);
  5589. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  5590. pdev->stats.err.desc_alloc_fail);
  5591. DP_PRINT_STATS(" IP checksum error = %d",
  5592. pdev->stats.err.ip_csum_err);
  5593. DP_PRINT_STATS(" TCP/UDP checksum error = %d",
  5594. pdev->stats.err.tcp_udp_csum_err);
  5595. /* Get bar_recv_cnt */
  5596. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  5597. DP_PRINT_STATS("BAR Received Count: = %d",
  5598. pdev->stats.rx.bar_recv_cnt);
  5599. }
  5600. /**
  5601. * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
  5602. * @pdev: DP_PDEV Handle
  5603. *
  5604. * Return: void
  5605. */
  5606. static inline void
  5607. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  5608. {
  5609. struct cdp_pdev_mon_stats *rx_mon_stats;
  5610. rx_mon_stats = &pdev->rx_mon_stats;
  5611. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  5612. dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
  5613. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  5614. rx_mon_stats->status_ppdu_done);
  5615. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  5616. rx_mon_stats->dest_ppdu_done);
  5617. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  5618. rx_mon_stats->dest_mpdu_done);
  5619. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  5620. rx_mon_stats->dest_mpdu_drop);
  5621. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  5622. rx_mon_stats->dup_mon_linkdesc_cnt);
  5623. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  5624. rx_mon_stats->dup_mon_buf_cnt);
  5625. }
  5626. /**
  5627. * dp_print_soc_tx_stats(): Print SOC level stats
  5628. * @soc DP_SOC Handle
  5629. *
  5630. * Return: void
  5631. */
  5632. static inline void
  5633. dp_print_soc_tx_stats(struct dp_soc *soc)
  5634. {
  5635. uint8_t desc_pool_id;
  5636. soc->stats.tx.desc_in_use = 0;
  5637. DP_PRINT_STATS("SOC Tx Stats:\n");
  5638. for (desc_pool_id = 0;
  5639. desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5640. desc_pool_id++)
  5641. soc->stats.tx.desc_in_use +=
  5642. soc->tx_desc[desc_pool_id].num_allocated;
  5643. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  5644. soc->stats.tx.desc_in_use);
  5645. DP_PRINT_STATS("Tx Invalid peer:");
  5646. DP_PRINT_STATS(" Packets = %d",
  5647. soc->stats.tx.tx_invalid_peer.num);
  5648. DP_PRINT_STATS(" Bytes = %llu",
  5649. soc->stats.tx.tx_invalid_peer.bytes);
  5650. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  5651. soc->stats.tx.tcl_ring_full[0],
  5652. soc->stats.tx.tcl_ring_full[1],
  5653. soc->stats.tx.tcl_ring_full[2]);
  5654. }
  5655. /**
  5656. * dp_print_soc_rx_stats: Print SOC level Rx stats
  5657. * @soc: DP_SOC Handle
  5658. *
  5659. * Return:void
  5660. */
  5661. static inline void
  5662. dp_print_soc_rx_stats(struct dp_soc *soc)
  5663. {
  5664. uint32_t i;
  5665. char reo_error[DP_REO_ERR_LENGTH];
  5666. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  5667. uint8_t index = 0;
  5668. DP_PRINT_STATS("No of AST Entries = %d", soc->num_ast_entries);
  5669. DP_PRINT_STATS("SOC Rx Stats:\n");
  5670. DP_PRINT_STATS("Fragmented packets: %u",
  5671. soc->stats.rx.rx_frags);
  5672. DP_PRINT_STATS("Reo reinjected packets: %u",
  5673. soc->stats.rx.reo_reinject);
  5674. DP_PRINT_STATS("Errors:\n");
  5675. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  5676. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  5677. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  5678. DP_PRINT_STATS("Invalid RBM = %d",
  5679. soc->stats.rx.err.invalid_rbm);
  5680. DP_PRINT_STATS("Invalid Vdev = %d",
  5681. soc->stats.rx.err.invalid_vdev);
  5682. DP_PRINT_STATS("Invalid Pdev = %d",
  5683. soc->stats.rx.err.invalid_pdev);
  5684. DP_PRINT_STATS("Invalid Peer = %d",
  5685. soc->stats.rx.err.rx_invalid_peer.num);
  5686. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  5687. soc->stats.rx.err.hal_ring_access_fail);
  5688. DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
  5689. DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
  5690. DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
  5691. DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
  5692. DP_PRINT_STATS("RX DUP DESC: %d",
  5693. soc->stats.rx.err.hal_reo_dest_dup);
  5694. DP_PRINT_STATS("RX REL DUP DESC: %d",
  5695. soc->stats.rx.err.hal_wbm_rel_dup);
  5696. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  5697. index += qdf_snprint(&rxdma_error[index],
  5698. DP_RXDMA_ERR_LENGTH - index,
  5699. " %d", soc->stats.rx.err.rxdma_error[i]);
  5700. }
  5701. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  5702. rxdma_error);
  5703. index = 0;
  5704. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  5705. index += qdf_snprint(&reo_error[index],
  5706. DP_REO_ERR_LENGTH - index,
  5707. " %d", soc->stats.rx.err.reo_error[i]);
  5708. }
  5709. DP_PRINT_STATS("REO Error(0-14):%s",
  5710. reo_error);
  5711. }
  5712. /**
  5713. * dp_srng_get_str_from_ring_type() - Return string name for a ring
  5714. * @ring_type: Ring
  5715. *
  5716. * Return: char const pointer
  5717. */
  5718. static inline const
  5719. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  5720. {
  5721. switch (ring_type) {
  5722. case REO_DST:
  5723. return "Reo_dst";
  5724. case REO_EXCEPTION:
  5725. return "Reo_exception";
  5726. case REO_CMD:
  5727. return "Reo_cmd";
  5728. case REO_REINJECT:
  5729. return "Reo_reinject";
  5730. case REO_STATUS:
  5731. return "Reo_status";
  5732. case WBM2SW_RELEASE:
  5733. return "wbm2sw_release";
  5734. case TCL_DATA:
  5735. return "tcl_data";
  5736. case TCL_CMD:
  5737. return "tcl_cmd";
  5738. case TCL_STATUS:
  5739. return "tcl_status";
  5740. case SW2WBM_RELEASE:
  5741. return "sw2wbm_release";
  5742. case RXDMA_BUF:
  5743. return "Rxdma_buf";
  5744. case RXDMA_DST:
  5745. return "Rxdma_dst";
  5746. case RXDMA_MONITOR_BUF:
  5747. return "Rxdma_monitor_buf";
  5748. case RXDMA_MONITOR_DESC:
  5749. return "Rxdma_monitor_desc";
  5750. case RXDMA_MONITOR_STATUS:
  5751. return "Rxdma_monitor_status";
  5752. default:
  5753. dp_err("Invalid ring type");
  5754. break;
  5755. }
  5756. return "Invalid";
  5757. }
  5758. /**
  5759. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  5760. * @soc: DP_SOC handle
  5761. * @srng: DP_SRNG handle
  5762. * @ring_name: SRNG name
  5763. * @ring_type: srng src/dst ring
  5764. *
  5765. * Return: void
  5766. */
  5767. static void
  5768. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  5769. enum hal_ring_type ring_type)
  5770. {
  5771. uint32_t tailp;
  5772. uint32_t headp;
  5773. int32_t hw_headp = -1;
  5774. int32_t hw_tailp = -1;
  5775. const char *ring_name;
  5776. struct hal_soc *hal_soc;
  5777. if (soc && srng && srng->hal_srng) {
  5778. hal_soc = (struct hal_soc *)soc->hal_soc;
  5779. ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
  5780. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  5781. DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
  5782. ring_name, headp, tailp);
  5783. hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
  5784. &hw_tailp, ring_type);
  5785. DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
  5786. ring_name, hw_headp, hw_tailp);
  5787. }
  5788. }
  5789. /**
  5790. * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
  5791. * on target
  5792. * @pdev: physical device handle
  5793. * @mac_id: mac id
  5794. *
  5795. * Return: void
  5796. */
  5797. static inline
  5798. void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
  5799. {
  5800. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  5801. dp_print_ring_stat_from_hal(pdev->soc,
  5802. &pdev->rxdma_mon_buf_ring[mac_id],
  5803. RXDMA_MONITOR_BUF);
  5804. dp_print_ring_stat_from_hal(pdev->soc,
  5805. &pdev->rxdma_mon_dst_ring[mac_id],
  5806. RXDMA_MONITOR_DST);
  5807. dp_print_ring_stat_from_hal(pdev->soc,
  5808. &pdev->rxdma_mon_desc_ring[mac_id],
  5809. RXDMA_MONITOR_DESC);
  5810. }
  5811. dp_print_ring_stat_from_hal(pdev->soc,
  5812. &pdev->rxdma_mon_status_ring[mac_id],
  5813. RXDMA_MONITOR_STATUS);
  5814. }
  5815. /**
  5816. * dp_print_ring_stats(): Print tail and head pointer
  5817. * @pdev: DP_PDEV handle
  5818. *
  5819. * Return:void
  5820. */
  5821. static inline void
  5822. dp_print_ring_stats(struct dp_pdev *pdev)
  5823. {
  5824. uint32_t i;
  5825. int mac_id;
  5826. dp_print_ring_stat_from_hal(pdev->soc,
  5827. &pdev->soc->reo_exception_ring,
  5828. REO_EXCEPTION);
  5829. dp_print_ring_stat_from_hal(pdev->soc,
  5830. &pdev->soc->reo_reinject_ring,
  5831. REO_REINJECT);
  5832. dp_print_ring_stat_from_hal(pdev->soc,
  5833. &pdev->soc->reo_cmd_ring,
  5834. REO_CMD);
  5835. dp_print_ring_stat_from_hal(pdev->soc,
  5836. &pdev->soc->reo_status_ring,
  5837. REO_STATUS);
  5838. dp_print_ring_stat_from_hal(pdev->soc,
  5839. &pdev->soc->rx_rel_ring,
  5840. WBM2SW_RELEASE);
  5841. dp_print_ring_stat_from_hal(pdev->soc,
  5842. &pdev->soc->tcl_cmd_ring,
  5843. TCL_CMD);
  5844. dp_print_ring_stat_from_hal(pdev->soc,
  5845. &pdev->soc->tcl_status_ring,
  5846. TCL_STATUS);
  5847. dp_print_ring_stat_from_hal(pdev->soc,
  5848. &pdev->soc->wbm_desc_rel_ring,
  5849. SW2WBM_RELEASE);
  5850. for (i = 0; i < MAX_REO_DEST_RINGS; i++)
  5851. dp_print_ring_stat_from_hal(pdev->soc,
  5852. &pdev->soc->reo_dest_ring[i],
  5853. REO_DST);
  5854. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
  5855. dp_print_ring_stat_from_hal(pdev->soc,
  5856. &pdev->soc->tcl_data_ring[i],
  5857. TCL_DATA);
  5858. for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
  5859. dp_print_ring_stat_from_hal(pdev->soc,
  5860. &pdev->soc->tx_comp_ring[i],
  5861. WBM2SW_RELEASE);
  5862. dp_print_ring_stat_from_hal(pdev->soc,
  5863. &pdev->rx_refill_buf_ring,
  5864. RXDMA_BUF);
  5865. dp_print_ring_stat_from_hal(pdev->soc,
  5866. &pdev->rx_refill_buf_ring2,
  5867. RXDMA_BUF);
  5868. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  5869. dp_print_ring_stat_from_hal(pdev->soc,
  5870. &pdev->rx_mac_buf_ring[i],
  5871. RXDMA_BUF);
  5872. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
  5873. dp_print_mon_ring_stat_from_hal(pdev, mac_id);
  5874. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
  5875. dp_print_ring_stat_from_hal(pdev->soc,
  5876. &pdev->rxdma_err_dst_ring[i],
  5877. RXDMA_DST);
  5878. }
  5879. /**
  5880. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  5881. * @vdev: DP_VDEV handle
  5882. *
  5883. * Return:void
  5884. */
  5885. static inline void
  5886. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  5887. {
  5888. struct dp_peer *peer = NULL;
  5889. if (!vdev || !vdev->pdev)
  5890. return;
  5891. DP_STATS_CLR(vdev->pdev);
  5892. DP_STATS_CLR(vdev->pdev->soc);
  5893. DP_STATS_CLR(vdev);
  5894. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5895. if (!peer)
  5896. return;
  5897. DP_STATS_CLR(peer);
  5898. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5899. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5900. &peer->stats, peer->peer_ids[0],
  5901. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  5902. #endif
  5903. }
  5904. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5905. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5906. &vdev->stats, vdev->vdev_id,
  5907. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5908. #endif
  5909. }
  5910. /**
  5911. * dp_print_common_rates_info(): Print common rate for tx or rx
  5912. * @pkt_type_array: rate type array contains rate info
  5913. *
  5914. * Return:void
  5915. */
  5916. static inline void
  5917. dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
  5918. {
  5919. uint8_t mcs, pkt_type;
  5920. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  5921. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  5922. if (!dp_rate_string[pkt_type][mcs].valid)
  5923. continue;
  5924. DP_PRINT_STATS(" %s = %d",
  5925. dp_rate_string[pkt_type][mcs].mcs_type,
  5926. pkt_type_array[pkt_type].mcs_count[mcs]);
  5927. }
  5928. DP_PRINT_STATS("\n");
  5929. }
  5930. }
  5931. /**
  5932. * dp_print_rx_rates(): Print Rx rate stats
  5933. * @vdev: DP_VDEV handle
  5934. *
  5935. * Return:void
  5936. */
  5937. static inline void
  5938. dp_print_rx_rates(struct dp_vdev *vdev)
  5939. {
  5940. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5941. uint8_t i;
  5942. uint8_t index = 0;
  5943. char nss[DP_NSS_LENGTH];
  5944. DP_PRINT_STATS("Rx Rate Info:\n");
  5945. dp_print_common_rates_info(pdev->stats.rx.pkt_type);
  5946. index = 0;
  5947. for (i = 0; i < SS_COUNT; i++) {
  5948. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5949. " %d", pdev->stats.rx.nss[i]);
  5950. }
  5951. DP_PRINT_STATS("NSS(1-8) = %s",
  5952. nss);
  5953. DP_PRINT_STATS("SGI ="
  5954. " 0.8us %d,"
  5955. " 0.4us %d,"
  5956. " 1.6us %d,"
  5957. " 3.2us %d,",
  5958. pdev->stats.rx.sgi_count[0],
  5959. pdev->stats.rx.sgi_count[1],
  5960. pdev->stats.rx.sgi_count[2],
  5961. pdev->stats.rx.sgi_count[3]);
  5962. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5963. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  5964. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  5965. DP_PRINT_STATS("Reception Type ="
  5966. " SU: %d,"
  5967. " MU_MIMO:%d,"
  5968. " MU_OFDMA:%d,"
  5969. " MU_OFDMA_MIMO:%d\n",
  5970. pdev->stats.rx.reception_type[0],
  5971. pdev->stats.rx.reception_type[1],
  5972. pdev->stats.rx.reception_type[2],
  5973. pdev->stats.rx.reception_type[3]);
  5974. DP_PRINT_STATS("Aggregation:\n");
  5975. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  5976. pdev->stats.rx.ampdu_cnt);
  5977. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  5978. pdev->stats.rx.non_ampdu_cnt);
  5979. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  5980. pdev->stats.rx.amsdu_cnt);
  5981. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  5982. pdev->stats.rx.non_amsdu_cnt);
  5983. }
  5984. /**
  5985. * dp_print_tx_rates(): Print tx rates
  5986. * @vdev: DP_VDEV handle
  5987. *
  5988. * Return:void
  5989. */
  5990. static inline void
  5991. dp_print_tx_rates(struct dp_vdev *vdev)
  5992. {
  5993. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5994. uint8_t index;
  5995. char nss[DP_NSS_LENGTH];
  5996. int nss_index;
  5997. DP_PRINT_STATS("Tx Rate Info:\n");
  5998. dp_print_common_rates_info(pdev->stats.tx.pkt_type);
  5999. DP_PRINT_STATS("SGI ="
  6000. " 0.8us %d"
  6001. " 0.4us %d"
  6002. " 1.6us %d"
  6003. " 3.2us %d",
  6004. pdev->stats.tx.sgi_count[0],
  6005. pdev->stats.tx.sgi_count[1],
  6006. pdev->stats.tx.sgi_count[2],
  6007. pdev->stats.tx.sgi_count[3]);
  6008. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  6009. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  6010. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  6011. index = 0;
  6012. for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
  6013. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6014. " %d", pdev->stats.tx.nss[nss_index]);
  6015. }
  6016. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  6017. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  6018. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  6019. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  6020. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  6021. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  6022. DP_PRINT_STATS("Aggregation:\n");
  6023. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  6024. pdev->stats.tx.amsdu_cnt);
  6025. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  6026. pdev->stats.tx.non_amsdu_cnt);
  6027. }
  6028. /**
  6029. * dp_print_peer_stats():print peer stats
  6030. * @peer: DP_PEER handle
  6031. *
  6032. * return void
  6033. */
  6034. static inline void dp_print_peer_stats(struct dp_peer *peer)
  6035. {
  6036. uint8_t i;
  6037. uint32_t index;
  6038. uint32_t j;
  6039. char nss[DP_NSS_LENGTH];
  6040. char mu_group_id[DP_MU_GROUP_LENGTH];
  6041. DP_PRINT_STATS("Node Tx Stats:\n");
  6042. DP_PRINT_STATS("Total Packet Completions = %d",
  6043. peer->stats.tx.comp_pkt.num);
  6044. DP_PRINT_STATS("Total Bytes Completions = %llu",
  6045. peer->stats.tx.comp_pkt.bytes);
  6046. DP_PRINT_STATS("Success Packets = %d",
  6047. peer->stats.tx.tx_success.num);
  6048. DP_PRINT_STATS("Success Bytes = %llu",
  6049. peer->stats.tx.tx_success.bytes);
  6050. DP_PRINT_STATS("Unicast Success Packets = %d",
  6051. peer->stats.tx.ucast.num);
  6052. DP_PRINT_STATS("Unicast Success Bytes = %llu",
  6053. peer->stats.tx.ucast.bytes);
  6054. DP_PRINT_STATS("Multicast Success Packets = %d",
  6055. peer->stats.tx.mcast.num);
  6056. DP_PRINT_STATS("Multicast Success Bytes = %llu",
  6057. peer->stats.tx.mcast.bytes);
  6058. DP_PRINT_STATS("Broadcast Success Packets = %d",
  6059. peer->stats.tx.bcast.num);
  6060. DP_PRINT_STATS("Broadcast Success Bytes = %llu",
  6061. peer->stats.tx.bcast.bytes);
  6062. DP_PRINT_STATS("Packets Failed = %d",
  6063. peer->stats.tx.tx_failed);
  6064. DP_PRINT_STATS("Packets In OFDMA = %d",
  6065. peer->stats.tx.ofdma);
  6066. DP_PRINT_STATS("Packets In STBC = %d",
  6067. peer->stats.tx.stbc);
  6068. DP_PRINT_STATS("Packets In LDPC = %d",
  6069. peer->stats.tx.ldpc);
  6070. DP_PRINT_STATS("Packet Retries = %d",
  6071. peer->stats.tx.retries);
  6072. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  6073. peer->stats.tx.amsdu_cnt);
  6074. DP_PRINT_STATS("Last Packet RSSI = %d",
  6075. peer->stats.tx.last_ack_rssi);
  6076. DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
  6077. peer->stats.tx.dropped.fw_rem.num);
  6078. DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
  6079. peer->stats.tx.dropped.fw_rem.bytes);
  6080. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  6081. peer->stats.tx.dropped.fw_rem_tx);
  6082. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  6083. peer->stats.tx.dropped.fw_rem_notx);
  6084. DP_PRINT_STATS("Dropped : Age Out = %d",
  6085. peer->stats.tx.dropped.age_out);
  6086. DP_PRINT_STATS("NAWDS : ");
  6087. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  6088. peer->stats.tx.nawds_mcast_drop);
  6089. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  6090. peer->stats.tx.nawds_mcast.num);
  6091. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
  6092. peer->stats.tx.nawds_mcast.bytes);
  6093. DP_PRINT_STATS("Rate Info:");
  6094. dp_print_common_rates_info(peer->stats.tx.pkt_type);
  6095. DP_PRINT_STATS("SGI = "
  6096. " 0.8us %d"
  6097. " 0.4us %d"
  6098. " 1.6us %d"
  6099. " 3.2us %d",
  6100. peer->stats.tx.sgi_count[0],
  6101. peer->stats.tx.sgi_count[1],
  6102. peer->stats.tx.sgi_count[2],
  6103. peer->stats.tx.sgi_count[3]);
  6104. DP_PRINT_STATS("Excess Retries per AC ");
  6105. DP_PRINT_STATS(" Best effort = %d",
  6106. peer->stats.tx.excess_retries_per_ac[0]);
  6107. DP_PRINT_STATS(" Background= %d",
  6108. peer->stats.tx.excess_retries_per_ac[1]);
  6109. DP_PRINT_STATS(" Video = %d",
  6110. peer->stats.tx.excess_retries_per_ac[2]);
  6111. DP_PRINT_STATS(" Voice = %d",
  6112. peer->stats.tx.excess_retries_per_ac[3]);
  6113. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  6114. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  6115. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  6116. index = 0;
  6117. for (i = 0; i < SS_COUNT; i++) {
  6118. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6119. " %d", peer->stats.tx.nss[i]);
  6120. }
  6121. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  6122. DP_PRINT_STATS("Transmit Type :");
  6123. DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d",
  6124. peer->stats.tx.transmit_type[0],
  6125. peer->stats.tx.transmit_type[1],
  6126. peer->stats.tx.transmit_type[2],
  6127. peer->stats.tx.transmit_type[3]);
  6128. for (i = 0; i < MAX_MU_GROUP_ID;) {
  6129. index = 0;
  6130. for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID;
  6131. j++) {
  6132. index += qdf_snprint(&mu_group_id[index],
  6133. DP_MU_GROUP_LENGTH - index,
  6134. " %d",
  6135. peer->stats.tx.mu_group_id[i]);
  6136. i++;
  6137. }
  6138. DP_PRINT_STATS("User position list for GID %02d->%d: [%s]",
  6139. i - DP_MU_GROUP_SHOW, i - 1, mu_group_id);
  6140. }
  6141. DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]",
  6142. peer->stats.tx.ru_start, peer->stats.tx.ru_tones);
  6143. DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:");
  6144. DP_PRINT_STATS("RU_26: %d", peer->stats.tx.ru_loc[0]);
  6145. DP_PRINT_STATS("RU 52: %d", peer->stats.tx.ru_loc[1]);
  6146. DP_PRINT_STATS("RU 106: %d", peer->stats.tx.ru_loc[2]);
  6147. DP_PRINT_STATS("RU 242: %d", peer->stats.tx.ru_loc[3]);
  6148. DP_PRINT_STATS("RU 484: %d", peer->stats.tx.ru_loc[4]);
  6149. DP_PRINT_STATS("RU 996: %d", peer->stats.tx.ru_loc[5]);
  6150. DP_PRINT_STATS("Aggregation:");
  6151. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  6152. peer->stats.tx.amsdu_cnt);
  6153. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  6154. peer->stats.tx.non_amsdu_cnt);
  6155. DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
  6156. DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
  6157. peer->stats.tx.tx_byte_rate);
  6158. DP_PRINT_STATS(" Data transmitted in last sec: %d",
  6159. peer->stats.tx.tx_data_rate);
  6160. DP_PRINT_STATS("Node Rx Stats:");
  6161. DP_PRINT_STATS("Packets Sent To Stack = %d",
  6162. peer->stats.rx.to_stack.num);
  6163. DP_PRINT_STATS("Bytes Sent To Stack = %llu",
  6164. peer->stats.rx.to_stack.bytes);
  6165. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  6166. DP_PRINT_STATS("Ring Id = %d", i);
  6167. DP_PRINT_STATS(" Packets Received = %d",
  6168. peer->stats.rx.rcvd_reo[i].num);
  6169. DP_PRINT_STATS(" Bytes Received = %llu",
  6170. peer->stats.rx.rcvd_reo[i].bytes);
  6171. }
  6172. DP_PRINT_STATS("Multicast Packets Received = %d",
  6173. peer->stats.rx.multicast.num);
  6174. DP_PRINT_STATS("Multicast Bytes Received = %llu",
  6175. peer->stats.rx.multicast.bytes);
  6176. DP_PRINT_STATS("Broadcast Packets Received = %d",
  6177. peer->stats.rx.bcast.num);
  6178. DP_PRINT_STATS("Broadcast Bytes Received = %llu",
  6179. peer->stats.rx.bcast.bytes);
  6180. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  6181. peer->stats.rx.intra_bss.pkts.num);
  6182. DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
  6183. peer->stats.rx.intra_bss.pkts.bytes);
  6184. DP_PRINT_STATS("Raw Packets Received = %d",
  6185. peer->stats.rx.raw.num);
  6186. DP_PRINT_STATS("Raw Bytes Received = %llu",
  6187. peer->stats.rx.raw.bytes);
  6188. DP_PRINT_STATS("Errors: MIC Errors = %d",
  6189. peer->stats.rx.err.mic_err);
  6190. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  6191. peer->stats.rx.err.decrypt_err);
  6192. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  6193. peer->stats.rx.non_ampdu_cnt);
  6194. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  6195. peer->stats.rx.ampdu_cnt);
  6196. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  6197. peer->stats.rx.non_amsdu_cnt);
  6198. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  6199. peer->stats.rx.amsdu_cnt);
  6200. DP_PRINT_STATS("NAWDS : ");
  6201. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  6202. peer->stats.rx.nawds_mcast_drop);
  6203. DP_PRINT_STATS("SGI ="
  6204. " 0.8us %d"
  6205. " 0.4us %d"
  6206. " 1.6us %d"
  6207. " 3.2us %d",
  6208. peer->stats.rx.sgi_count[0],
  6209. peer->stats.rx.sgi_count[1],
  6210. peer->stats.rx.sgi_count[2],
  6211. peer->stats.rx.sgi_count[3]);
  6212. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  6213. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  6214. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  6215. DP_PRINT_STATS("Reception Type ="
  6216. " SU %d,"
  6217. " MU_MIMO %d,"
  6218. " MU_OFDMA %d,"
  6219. " MU_OFDMA_MIMO %d",
  6220. peer->stats.rx.reception_type[0],
  6221. peer->stats.rx.reception_type[1],
  6222. peer->stats.rx.reception_type[2],
  6223. peer->stats.rx.reception_type[3]);
  6224. dp_print_common_rates_info(peer->stats.rx.pkt_type);
  6225. index = 0;
  6226. for (i = 0; i < SS_COUNT; i++) {
  6227. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6228. " %d", peer->stats.rx.nss[i]);
  6229. }
  6230. DP_PRINT_STATS("NSS(1-8) = %s",
  6231. nss);
  6232. DP_PRINT_STATS("Aggregation:");
  6233. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  6234. peer->stats.rx.ampdu_cnt);
  6235. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  6236. peer->stats.rx.non_ampdu_cnt);
  6237. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  6238. peer->stats.rx.amsdu_cnt);
  6239. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  6240. peer->stats.rx.non_amsdu_cnt);
  6241. DP_PRINT_STATS("Bytes and Packets received in last one sec:");
  6242. DP_PRINT_STATS(" Bytes received in last sec: %d",
  6243. peer->stats.rx.rx_byte_rate);
  6244. DP_PRINT_STATS(" Data received in last sec: %d",
  6245. peer->stats.rx.rx_data_rate);
  6246. }
  6247. /*
  6248. * dp_get_host_peer_stats()- function to print peer stats
  6249. * @pdev_handle: DP_PDEV handle
  6250. * @mac_addr: mac address of the peer
  6251. *
  6252. * Return: void
  6253. */
  6254. static void
  6255. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  6256. {
  6257. struct dp_peer *peer;
  6258. uint8_t local_id;
  6259. if (!mac_addr) {
  6260. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6261. "Invalid MAC address\n");
  6262. return;
  6263. }
  6264. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  6265. &local_id);
  6266. if (!peer) {
  6267. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6268. "%s: Invalid peer\n", __func__);
  6269. return;
  6270. }
  6271. dp_print_peer_stats(peer);
  6272. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6273. }
  6274. /**
  6275. * dp_print_soc_cfg_params()- Dump soc wlan config parameters
  6276. * @soc_handle: Soc handle
  6277. *
  6278. * Return: void
  6279. */
  6280. static void
  6281. dp_print_soc_cfg_params(struct dp_soc *soc)
  6282. {
  6283. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  6284. uint8_t index = 0, i = 0;
  6285. char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
  6286. int num_of_int_contexts;
  6287. if (!soc) {
  6288. dp_err("Context is null");
  6289. return;
  6290. }
  6291. soc_cfg_ctx = soc->wlan_cfg_ctx;
  6292. if (!soc_cfg_ctx) {
  6293. dp_err("Context is null");
  6294. return;
  6295. }
  6296. num_of_int_contexts =
  6297. wlan_cfg_get_num_contexts(soc_cfg_ctx);
  6298. DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
  6299. soc_cfg_ctx->num_int_ctxts);
  6300. DP_TRACE_STATS(DEBUG, "Max clients: %u",
  6301. soc_cfg_ctx->max_clients);
  6302. DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
  6303. soc_cfg_ctx->max_alloc_size);
  6304. DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
  6305. soc_cfg_ctx->per_pdev_tx_ring);
  6306. DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
  6307. soc_cfg_ctx->num_tcl_data_rings);
  6308. DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
  6309. soc_cfg_ctx->per_pdev_rx_ring);
  6310. DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
  6311. soc_cfg_ctx->per_pdev_lmac_ring);
  6312. DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
  6313. soc_cfg_ctx->num_reo_dest_rings);
  6314. DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
  6315. soc_cfg_ctx->num_tx_desc_pool);
  6316. DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
  6317. soc_cfg_ctx->num_tx_ext_desc_pool);
  6318. DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
  6319. soc_cfg_ctx->num_tx_desc);
  6320. DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
  6321. soc_cfg_ctx->num_tx_ext_desc);
  6322. DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
  6323. soc_cfg_ctx->htt_packet_type);
  6324. DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
  6325. soc_cfg_ctx->max_peer_id);
  6326. DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
  6327. soc_cfg_ctx->tx_ring_size);
  6328. DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
  6329. soc_cfg_ctx->tx_comp_ring_size);
  6330. DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
  6331. soc_cfg_ctx->tx_comp_ring_size_nss);
  6332. DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
  6333. soc_cfg_ctx->int_batch_threshold_tx);
  6334. DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
  6335. soc_cfg_ctx->int_timer_threshold_tx);
  6336. DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
  6337. soc_cfg_ctx->int_batch_threshold_rx);
  6338. DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
  6339. soc_cfg_ctx->int_timer_threshold_rx);
  6340. DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
  6341. soc_cfg_ctx->int_batch_threshold_other);
  6342. DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
  6343. soc_cfg_ctx->int_timer_threshold_other);
  6344. for (i = 0; i < num_of_int_contexts; i++) {
  6345. index += qdf_snprint(&ring_mask[index],
  6346. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6347. " %d",
  6348. soc_cfg_ctx->int_tx_ring_mask[i]);
  6349. }
  6350. DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
  6351. num_of_int_contexts, ring_mask);
  6352. index = 0;
  6353. for (i = 0; i < num_of_int_contexts; i++) {
  6354. index += qdf_snprint(&ring_mask[index],
  6355. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6356. " %d",
  6357. soc_cfg_ctx->int_rx_ring_mask[i]);
  6358. }
  6359. DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
  6360. num_of_int_contexts, ring_mask);
  6361. index = 0;
  6362. for (i = 0; i < num_of_int_contexts; i++) {
  6363. index += qdf_snprint(&ring_mask[index],
  6364. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6365. " %d",
  6366. soc_cfg_ctx->int_rx_mon_ring_mask[i]);
  6367. }
  6368. DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
  6369. num_of_int_contexts, ring_mask);
  6370. index = 0;
  6371. for (i = 0; i < num_of_int_contexts; i++) {
  6372. index += qdf_snprint(&ring_mask[index],
  6373. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6374. " %d",
  6375. soc_cfg_ctx->int_rx_err_ring_mask[i]);
  6376. }
  6377. DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
  6378. num_of_int_contexts, ring_mask);
  6379. index = 0;
  6380. for (i = 0; i < num_of_int_contexts; i++) {
  6381. index += qdf_snprint(&ring_mask[index],
  6382. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6383. " %d",
  6384. soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
  6385. }
  6386. DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
  6387. num_of_int_contexts, ring_mask);
  6388. index = 0;
  6389. for (i = 0; i < num_of_int_contexts; i++) {
  6390. index += qdf_snprint(&ring_mask[index],
  6391. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6392. " %d",
  6393. soc_cfg_ctx->int_reo_status_ring_mask[i]);
  6394. }
  6395. DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
  6396. num_of_int_contexts, ring_mask);
  6397. index = 0;
  6398. for (i = 0; i < num_of_int_contexts; i++) {
  6399. index += qdf_snprint(&ring_mask[index],
  6400. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6401. " %d",
  6402. soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
  6403. }
  6404. DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
  6405. num_of_int_contexts, ring_mask);
  6406. index = 0;
  6407. for (i = 0; i < num_of_int_contexts; i++) {
  6408. index += qdf_snprint(&ring_mask[index],
  6409. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6410. " %d",
  6411. soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
  6412. }
  6413. DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
  6414. num_of_int_contexts, ring_mask);
  6415. DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
  6416. soc_cfg_ctx->rx_hash);
  6417. DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
  6418. soc_cfg_ctx->tso_enabled);
  6419. DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
  6420. soc_cfg_ctx->lro_enabled);
  6421. DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
  6422. soc_cfg_ctx->sg_enabled);
  6423. DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
  6424. soc_cfg_ctx->gro_enabled);
  6425. DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
  6426. soc_cfg_ctx->rawmode_enabled);
  6427. DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
  6428. soc_cfg_ctx->peer_flow_ctrl_enabled);
  6429. DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
  6430. soc_cfg_ctx->napi_enabled);
  6431. DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
  6432. soc_cfg_ctx->tcp_udp_checksumoffload);
  6433. DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
  6434. soc_cfg_ctx->defrag_timeout_check);
  6435. DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
  6436. soc_cfg_ctx->rx_defrag_min_timeout);
  6437. DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
  6438. soc_cfg_ctx->wbm_release_ring);
  6439. DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
  6440. soc_cfg_ctx->tcl_cmd_ring);
  6441. DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
  6442. soc_cfg_ctx->tcl_status_ring);
  6443. DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
  6444. soc_cfg_ctx->reo_reinject_ring);
  6445. DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
  6446. soc_cfg_ctx->rx_release_ring);
  6447. DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
  6448. soc_cfg_ctx->reo_exception_ring);
  6449. DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
  6450. soc_cfg_ctx->reo_cmd_ring);
  6451. DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
  6452. soc_cfg_ctx->reo_status_ring);
  6453. DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
  6454. soc_cfg_ctx->rxdma_refill_ring);
  6455. DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
  6456. soc_cfg_ctx->rxdma_err_dst_ring);
  6457. }
  6458. /**
  6459. * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
  6460. * @pdev_handle: DP pdev handle
  6461. *
  6462. * Return - void
  6463. */
  6464. static void
  6465. dp_print_pdev_cfg_params(struct dp_pdev *pdev)
  6466. {
  6467. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  6468. if (!pdev) {
  6469. dp_err("Context is null");
  6470. return;
  6471. }
  6472. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  6473. if (!pdev_cfg_ctx) {
  6474. dp_err("Context is null");
  6475. return;
  6476. }
  6477. DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
  6478. pdev_cfg_ctx->rx_dma_buf_ring_size);
  6479. DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
  6480. pdev_cfg_ctx->dma_mon_buf_ring_size);
  6481. DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
  6482. pdev_cfg_ctx->dma_mon_dest_ring_size);
  6483. DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
  6484. pdev_cfg_ctx->dma_mon_status_ring_size);
  6485. DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
  6486. pdev_cfg_ctx->rxdma_monitor_desc_ring);
  6487. DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
  6488. pdev_cfg_ctx->num_mac_rings);
  6489. }
  6490. /**
  6491. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6492. *
  6493. * Return: None
  6494. */
  6495. static void dp_txrx_stats_help(void)
  6496. {
  6497. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6498. dp_info("stats_option:");
  6499. dp_info(" 1 -- HTT Tx Statistics");
  6500. dp_info(" 2 -- HTT Rx Statistics");
  6501. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6502. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6503. dp_info(" 5 -- HTT Error Statistics");
  6504. dp_info(" 6 -- HTT TQM Statistics");
  6505. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6506. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6507. dp_info(" 9 -- HTT Tx Rate Statistics");
  6508. dp_info(" 10 -- HTT Rx Rate Statistics");
  6509. dp_info(" 11 -- HTT Peer Statistics");
  6510. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6511. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6512. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6513. dp_info(" 15 -- HTT SRNG Statistics");
  6514. dp_info(" 16 -- HTT SFM Info Statistics");
  6515. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6516. dp_info(" 18 -- HTT Peer List Details");
  6517. dp_info(" 20 -- Clear Host Statistics");
  6518. dp_info(" 21 -- Host Rx Rate Statistics");
  6519. dp_info(" 22 -- Host Tx Rate Statistics");
  6520. dp_info(" 23 -- Host Tx Statistics");
  6521. dp_info(" 24 -- Host Rx Statistics");
  6522. dp_info(" 25 -- Host AST Statistics");
  6523. dp_info(" 26 -- Host SRNG PTR Statistics");
  6524. dp_info(" 27 -- Host Mon Statistics");
  6525. dp_info(" 28 -- Host REO Queue Statistics");
  6526. dp_info(" 29 -- Host Soc cfg param Statistics");
  6527. dp_info(" 30 -- Host pdev cfg param Statistics");
  6528. }
  6529. /**
  6530. * dp_print_host_stats()- Function to print the stats aggregated at host
  6531. * @vdev_handle: DP_VDEV handle
  6532. * @type: host stats type
  6533. *
  6534. * Return: 0 on success, print error message in case of failure
  6535. */
  6536. static int
  6537. dp_print_host_stats(struct cdp_vdev *vdev_handle,
  6538. struct cdp_txrx_stats_req *req)
  6539. {
  6540. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6541. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6542. enum cdp_host_txrx_stats type =
  6543. dp_stats_mapping_table[req->stats][STATS_HOST];
  6544. dp_aggregate_pdev_stats(pdev);
  6545. switch (type) {
  6546. case TXRX_CLEAR_STATS:
  6547. dp_txrx_host_stats_clr(vdev);
  6548. break;
  6549. case TXRX_RX_RATE_STATS:
  6550. dp_print_rx_rates(vdev);
  6551. break;
  6552. case TXRX_TX_RATE_STATS:
  6553. dp_print_tx_rates(vdev);
  6554. break;
  6555. case TXRX_TX_HOST_STATS:
  6556. dp_print_pdev_tx_stats(pdev);
  6557. dp_print_soc_tx_stats(pdev->soc);
  6558. break;
  6559. case TXRX_RX_HOST_STATS:
  6560. dp_print_pdev_rx_stats(pdev);
  6561. dp_print_soc_rx_stats(pdev->soc);
  6562. break;
  6563. case TXRX_AST_STATS:
  6564. dp_print_ast_stats(pdev->soc);
  6565. dp_print_peer_table(vdev);
  6566. break;
  6567. case TXRX_SRNG_PTR_STATS:
  6568. dp_print_ring_stats(pdev);
  6569. break;
  6570. case TXRX_RX_MON_STATS:
  6571. dp_print_pdev_rx_mon_stats(pdev);
  6572. break;
  6573. case TXRX_REO_QUEUE_STATS:
  6574. dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
  6575. break;
  6576. case TXRX_SOC_CFG_PARAMS:
  6577. dp_print_soc_cfg_params(pdev->soc);
  6578. break;
  6579. case TXRX_PDEV_CFG_PARAMS:
  6580. dp_print_pdev_cfg_params(pdev);
  6581. break;
  6582. default:
  6583. dp_info("Wrong Input For TxRx Host Stats");
  6584. dp_txrx_stats_help();
  6585. break;
  6586. }
  6587. return 0;
  6588. }
  6589. /*
  6590. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6591. * @pdev: DP_PDEV handle
  6592. *
  6593. * Return: void
  6594. */
  6595. static void
  6596. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6597. {
  6598. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6599. int mac_id;
  6600. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  6601. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6602. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6603. pdev->pdev_id);
  6604. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6605. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6606. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6607. }
  6608. }
  6609. /*
  6610. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6611. * @pdev: DP_PDEV handle
  6612. *
  6613. * Return: void
  6614. */
  6615. static void
  6616. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6617. {
  6618. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6619. int mac_id;
  6620. htt_tlv_filter.mpdu_start = 1;
  6621. htt_tlv_filter.msdu_start = 0;
  6622. htt_tlv_filter.packet = 0;
  6623. htt_tlv_filter.msdu_end = 0;
  6624. htt_tlv_filter.mpdu_end = 0;
  6625. htt_tlv_filter.attention = 0;
  6626. htt_tlv_filter.ppdu_start = 1;
  6627. htt_tlv_filter.ppdu_end = 1;
  6628. htt_tlv_filter.ppdu_end_user_stats = 1;
  6629. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6630. htt_tlv_filter.ppdu_end_status_done = 1;
  6631. htt_tlv_filter.enable_fp = 1;
  6632. htt_tlv_filter.enable_md = 0;
  6633. if (pdev->neighbour_peers_added &&
  6634. pdev->soc->hw_nac_monitor_support) {
  6635. htt_tlv_filter.enable_md = 1;
  6636. htt_tlv_filter.packet_header = 1;
  6637. }
  6638. if (pdev->mcopy_mode) {
  6639. htt_tlv_filter.packet_header = 1;
  6640. htt_tlv_filter.enable_mo = 1;
  6641. }
  6642. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6643. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6644. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6645. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6646. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6647. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6648. if (pdev->neighbour_peers_added &&
  6649. pdev->soc->hw_nac_monitor_support)
  6650. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6651. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6652. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6653. pdev->pdev_id);
  6654. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6655. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6656. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6657. }
  6658. }
  6659. /*
  6660. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6661. * modes are enabled or not.
  6662. * @dp_pdev: dp pdev handle.
  6663. *
  6664. * Return: bool
  6665. */
  6666. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6667. {
  6668. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6669. !pdev->mcopy_mode)
  6670. return true;
  6671. else
  6672. return false;
  6673. }
  6674. /*
  6675. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6676. *@pdev_handle: DP_PDEV handle.
  6677. *@val: Provided value.
  6678. *
  6679. *Return: 0 for success. nonzero for failure.
  6680. */
  6681. static QDF_STATUS
  6682. dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
  6683. {
  6684. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6685. switch (val) {
  6686. case CDP_BPR_DISABLE:
  6687. pdev->bpr_enable = CDP_BPR_DISABLE;
  6688. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6689. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6690. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6691. } else if (pdev->enhanced_stats_en &&
  6692. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6693. !pdev->pktlog_ppdu_stats) {
  6694. dp_h2t_cfg_stats_msg_send(pdev,
  6695. DP_PPDU_STATS_CFG_ENH_STATS,
  6696. pdev->pdev_id);
  6697. }
  6698. break;
  6699. case CDP_BPR_ENABLE:
  6700. pdev->bpr_enable = CDP_BPR_ENABLE;
  6701. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6702. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6703. dp_h2t_cfg_stats_msg_send(pdev,
  6704. DP_PPDU_STATS_CFG_BPR,
  6705. pdev->pdev_id);
  6706. } else if (pdev->enhanced_stats_en &&
  6707. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6708. !pdev->pktlog_ppdu_stats) {
  6709. dp_h2t_cfg_stats_msg_send(pdev,
  6710. DP_PPDU_STATS_CFG_BPR_ENH,
  6711. pdev->pdev_id);
  6712. } else if (pdev->pktlog_ppdu_stats) {
  6713. dp_h2t_cfg_stats_msg_send(pdev,
  6714. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6715. pdev->pdev_id);
  6716. }
  6717. break;
  6718. default:
  6719. break;
  6720. }
  6721. return QDF_STATUS_SUCCESS;
  6722. }
  6723. /*
  6724. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6725. * @pdev_handle: DP_PDEV handle
  6726. * @val: user provided value
  6727. *
  6728. * Return: 0 for success. nonzero for failure.
  6729. */
  6730. static QDF_STATUS
  6731. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6732. {
  6733. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6734. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6735. if (pdev->mcopy_mode)
  6736. dp_reset_monitor_mode(pdev_handle);
  6737. switch (val) {
  6738. case 0:
  6739. pdev->tx_sniffer_enable = 0;
  6740. pdev->mcopy_mode = 0;
  6741. pdev->monitor_configured = false;
  6742. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6743. !pdev->bpr_enable) {
  6744. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6745. dp_ppdu_ring_reset(pdev);
  6746. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6747. dp_h2t_cfg_stats_msg_send(pdev,
  6748. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6749. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6750. dp_h2t_cfg_stats_msg_send(pdev,
  6751. DP_PPDU_STATS_CFG_BPR_ENH,
  6752. pdev->pdev_id);
  6753. } else {
  6754. dp_h2t_cfg_stats_msg_send(pdev,
  6755. DP_PPDU_STATS_CFG_BPR,
  6756. pdev->pdev_id);
  6757. }
  6758. break;
  6759. case 1:
  6760. pdev->tx_sniffer_enable = 1;
  6761. pdev->mcopy_mode = 0;
  6762. pdev->monitor_configured = false;
  6763. if (!pdev->pktlog_ppdu_stats)
  6764. dp_h2t_cfg_stats_msg_send(pdev,
  6765. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6766. break;
  6767. case 2:
  6768. if (pdev->monitor_vdev) {
  6769. status = QDF_STATUS_E_RESOURCES;
  6770. break;
  6771. }
  6772. pdev->mcopy_mode = 1;
  6773. dp_pdev_configure_monitor_rings(pdev);
  6774. pdev->monitor_configured = true;
  6775. pdev->tx_sniffer_enable = 0;
  6776. if (!pdev->pktlog_ppdu_stats)
  6777. dp_h2t_cfg_stats_msg_send(pdev,
  6778. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6779. break;
  6780. default:
  6781. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6782. "Invalid value");
  6783. break;
  6784. }
  6785. return status;
  6786. }
  6787. /*
  6788. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6789. * @pdev_handle: DP_PDEV handle
  6790. *
  6791. * Return: void
  6792. */
  6793. static void
  6794. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6795. {
  6796. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6797. if (pdev->enhanced_stats_en == 0)
  6798. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6799. pdev->enhanced_stats_en = 1;
  6800. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6801. !pdev->monitor_vdev)
  6802. dp_ppdu_ring_cfg(pdev);
  6803. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6804. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6805. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6806. dp_h2t_cfg_stats_msg_send(pdev,
  6807. DP_PPDU_STATS_CFG_BPR_ENH,
  6808. pdev->pdev_id);
  6809. }
  6810. }
  6811. /*
  6812. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6813. * @pdev_handle: DP_PDEV handle
  6814. *
  6815. * Return: void
  6816. */
  6817. static void
  6818. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6819. {
  6820. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6821. if (pdev->enhanced_stats_en == 1)
  6822. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6823. pdev->enhanced_stats_en = 0;
  6824. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6825. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6826. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6827. dp_h2t_cfg_stats_msg_send(pdev,
  6828. DP_PPDU_STATS_CFG_BPR,
  6829. pdev->pdev_id);
  6830. }
  6831. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6832. !pdev->monitor_vdev)
  6833. dp_ppdu_ring_reset(pdev);
  6834. }
  6835. /*
  6836. * dp_get_fw_peer_stats()- function to print peer stats
  6837. * @pdev_handle: DP_PDEV handle
  6838. * @mac_addr: mac address of the peer
  6839. * @cap: Type of htt stats requested
  6840. * @is_wait: if set, wait on completion from firmware response
  6841. *
  6842. * Currently Supporting only MAC ID based requests Only
  6843. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6844. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6845. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6846. *
  6847. * Return: void
  6848. */
  6849. static void
  6850. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  6851. uint32_t cap, uint32_t is_wait)
  6852. {
  6853. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6854. int i;
  6855. uint32_t config_param0 = 0;
  6856. uint32_t config_param1 = 0;
  6857. uint32_t config_param2 = 0;
  6858. uint32_t config_param3 = 0;
  6859. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6860. config_param0 |= (1 << (cap + 1));
  6861. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6862. config_param1 |= (1 << i);
  6863. }
  6864. config_param2 |= (mac_addr[0] & 0x000000ff);
  6865. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6866. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6867. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6868. config_param3 |= (mac_addr[4] & 0x000000ff);
  6869. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6870. if (is_wait) {
  6871. qdf_event_reset(&pdev->fw_peer_stats_event);
  6872. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6873. config_param0, config_param1,
  6874. config_param2, config_param3,
  6875. 0, 1, 0);
  6876. qdf_wait_single_event(&pdev->fw_peer_stats_event,
  6877. DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
  6878. } else {
  6879. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6880. config_param0, config_param1,
  6881. config_param2, config_param3,
  6882. 0, 0, 0);
  6883. }
  6884. }
  6885. /* This struct definition will be removed from here
  6886. * once it get added in FW headers*/
  6887. struct httstats_cmd_req {
  6888. uint32_t config_param0;
  6889. uint32_t config_param1;
  6890. uint32_t config_param2;
  6891. uint32_t config_param3;
  6892. int cookie;
  6893. u_int8_t stats_id;
  6894. };
  6895. /*
  6896. * dp_get_htt_stats: function to process the httstas request
  6897. * @pdev_handle: DP pdev handle
  6898. * @data: pointer to request data
  6899. * @data_len: length for request data
  6900. *
  6901. * return: void
  6902. */
  6903. static void
  6904. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  6905. {
  6906. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6907. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6908. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6909. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6910. req->config_param0, req->config_param1,
  6911. req->config_param2, req->config_param3,
  6912. req->cookie, 0, 0);
  6913. }
  6914. /*
  6915. * dp_set_pdev_param: function to set parameters in pdev
  6916. * @pdev_handle: DP pdev handle
  6917. * @param: parameter type to be set
  6918. * @val: value of parameter to be set
  6919. *
  6920. * Return: 0 for success. nonzero for failure.
  6921. */
  6922. static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6923. enum cdp_pdev_param_type param,
  6924. uint8_t val)
  6925. {
  6926. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6927. switch (param) {
  6928. case CDP_CONFIG_DEBUG_SNIFFER:
  6929. return dp_config_debug_sniffer(pdev_handle, val);
  6930. case CDP_CONFIG_BPR_ENABLE:
  6931. return dp_set_bpr_enable(pdev_handle, val);
  6932. case CDP_CONFIG_PRIMARY_RADIO:
  6933. pdev->is_primary = val;
  6934. break;
  6935. default:
  6936. return QDF_STATUS_E_INVAL;
  6937. }
  6938. return QDF_STATUS_SUCCESS;
  6939. }
  6940. /*
  6941. * dp_get_vdev_param: function to get parameters from vdev
  6942. * @param: parameter type to get value
  6943. *
  6944. * return: void
  6945. */
  6946. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  6947. enum cdp_vdev_param_type param)
  6948. {
  6949. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6950. uint32_t val;
  6951. switch (param) {
  6952. case CDP_ENABLE_WDS:
  6953. val = vdev->wds_enabled;
  6954. break;
  6955. case CDP_ENABLE_MEC:
  6956. val = vdev->mec_enabled;
  6957. break;
  6958. case CDP_ENABLE_DA_WAR:
  6959. val = vdev->pdev->soc->da_war_enabled;
  6960. break;
  6961. default:
  6962. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6963. "param value %d is wrong\n",
  6964. param);
  6965. val = -1;
  6966. break;
  6967. }
  6968. return val;
  6969. }
  6970. /*
  6971. * dp_set_vdev_param: function to set parameters in vdev
  6972. * @param: parameter type to be set
  6973. * @val: value of parameter to be set
  6974. *
  6975. * return: void
  6976. */
  6977. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  6978. enum cdp_vdev_param_type param, uint32_t val)
  6979. {
  6980. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6981. switch (param) {
  6982. case CDP_ENABLE_WDS:
  6983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6984. "wds_enable %d for vdev(%p) id(%d)\n",
  6985. val, vdev, vdev->vdev_id);
  6986. vdev->wds_enabled = val;
  6987. break;
  6988. case CDP_ENABLE_MEC:
  6989. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6990. "mec_enable %d for vdev(%p) id(%d)\n",
  6991. val, vdev, vdev->vdev_id);
  6992. vdev->mec_enabled = val;
  6993. break;
  6994. case CDP_ENABLE_DA_WAR:
  6995. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6996. "da_war_enable %d for vdev(%p) id(%d)\n",
  6997. val, vdev, vdev->vdev_id);
  6998. vdev->pdev->soc->da_war_enabled = val;
  6999. dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
  7000. vdev->pdev->soc));
  7001. break;
  7002. case CDP_ENABLE_NAWDS:
  7003. vdev->nawds_enabled = val;
  7004. break;
  7005. case CDP_ENABLE_MCAST_EN:
  7006. vdev->mcast_enhancement_en = val;
  7007. break;
  7008. case CDP_ENABLE_PROXYSTA:
  7009. vdev->proxysta_vdev = val;
  7010. break;
  7011. case CDP_UPDATE_TDLS_FLAGS:
  7012. vdev->tdls_link_connected = val;
  7013. break;
  7014. case CDP_CFG_WDS_AGING_TIMER:
  7015. if (val == 0)
  7016. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  7017. else if (val != vdev->wds_aging_timer_val)
  7018. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  7019. vdev->wds_aging_timer_val = val;
  7020. break;
  7021. case CDP_ENABLE_AP_BRIDGE:
  7022. if (wlan_op_mode_sta != vdev->opmode)
  7023. vdev->ap_bridge_enabled = val;
  7024. else
  7025. vdev->ap_bridge_enabled = false;
  7026. break;
  7027. case CDP_ENABLE_CIPHER:
  7028. vdev->sec_type = val;
  7029. break;
  7030. case CDP_ENABLE_QWRAP_ISOLATION:
  7031. vdev->isolation_vdev = val;
  7032. break;
  7033. default:
  7034. break;
  7035. }
  7036. dp_tx_vdev_update_search_flags(vdev);
  7037. }
  7038. /**
  7039. * dp_peer_set_nawds: set nawds bit in peer
  7040. * @peer_handle: pointer to peer
  7041. * @value: enable/disable nawds
  7042. *
  7043. * return: void
  7044. */
  7045. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  7046. {
  7047. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7048. peer->nawds_enabled = value;
  7049. }
  7050. /*
  7051. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  7052. * @vdev_handle: DP_VDEV handle
  7053. * @map_id:ID of map that needs to be updated
  7054. *
  7055. * Return: void
  7056. */
  7057. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  7058. uint8_t map_id)
  7059. {
  7060. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7061. vdev->dscp_tid_map_id = map_id;
  7062. return;
  7063. }
  7064. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  7065. * @peer_handle: DP pdev handle
  7066. *
  7067. * return : cdp_pdev_stats pointer
  7068. */
  7069. static struct cdp_pdev_stats*
  7070. dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
  7071. {
  7072. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7073. dp_aggregate_pdev_stats(pdev);
  7074. return &pdev->stats;
  7075. }
  7076. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  7077. * @peer_handle: DP_PEER handle
  7078. *
  7079. * return : cdp_peer_stats pointer
  7080. */
  7081. static struct cdp_peer_stats*
  7082. dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
  7083. {
  7084. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7085. qdf_assert(peer);
  7086. return &peer->stats;
  7087. }
  7088. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  7089. * @peer_handle: DP_PEER handle
  7090. *
  7091. * return : void
  7092. */
  7093. static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
  7094. {
  7095. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7096. qdf_assert(peer);
  7097. qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
  7098. }
  7099. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  7100. * @vdev_handle: DP_VDEV handle
  7101. * @buf: buffer for vdev stats
  7102. *
  7103. * return : int
  7104. */
  7105. static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
  7106. bool is_aggregate)
  7107. {
  7108. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7109. struct cdp_vdev_stats *vdev_stats;
  7110. struct dp_pdev *pdev;
  7111. struct dp_soc *soc;
  7112. if (!vdev)
  7113. return 1;
  7114. pdev = vdev->pdev;
  7115. if (!pdev)
  7116. return 1;
  7117. soc = pdev->soc;
  7118. vdev_stats = (struct cdp_vdev_stats *)buf;
  7119. if (is_aggregate) {
  7120. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  7121. dp_aggregate_vdev_stats(vdev, buf);
  7122. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  7123. } else {
  7124. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  7125. }
  7126. return 0;
  7127. }
  7128. /*
  7129. * dp_get_total_per(): get total per
  7130. * @pdev_handle: DP_PDEV handle
  7131. *
  7132. * Return: % error rate using retries per packet and success packets
  7133. */
  7134. static int dp_get_total_per(struct cdp_pdev *pdev_handle)
  7135. {
  7136. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7137. dp_aggregate_pdev_stats(pdev);
  7138. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  7139. return 0;
  7140. return ((pdev->stats.tx.retries * 100) /
  7141. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  7142. }
  7143. /*
  7144. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  7145. * @pdev_handle: DP_PDEV handle
  7146. * @buf: to hold pdev_stats
  7147. *
  7148. * Return: int
  7149. */
  7150. static int
  7151. dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
  7152. {
  7153. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7154. struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
  7155. struct cdp_txrx_stats_req req = {0,};
  7156. dp_aggregate_pdev_stats(pdev);
  7157. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  7158. req.cookie_val = 1;
  7159. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7160. req.param1, req.param2, req.param3, 0,
  7161. req.cookie_val, 0);
  7162. msleep(DP_MAX_SLEEP_TIME);
  7163. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  7164. req.cookie_val = 1;
  7165. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7166. req.param1, req.param2, req.param3, 0,
  7167. req.cookie_val, 0);
  7168. msleep(DP_MAX_SLEEP_TIME);
  7169. qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
  7170. return TXRX_STATS_LEVEL;
  7171. }
  7172. /**
  7173. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  7174. * @pdev: DP_PDEV handle
  7175. * @map_id: ID of map that needs to be updated
  7176. * @tos: index value in map
  7177. * @tid: tid value passed by the user
  7178. *
  7179. * Return: void
  7180. */
  7181. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  7182. uint8_t map_id, uint8_t tos, uint8_t tid)
  7183. {
  7184. uint8_t dscp;
  7185. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  7186. struct dp_soc *soc = pdev->soc;
  7187. if (!soc)
  7188. return;
  7189. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  7190. pdev->dscp_tid_map[map_id][dscp] = tid;
  7191. if (map_id < soc->num_hw_dscp_tid_map)
  7192. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  7193. map_id, dscp);
  7194. return;
  7195. }
  7196. /**
  7197. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  7198. * @pdev_handle: pdev handle
  7199. * @val: hmmc-dscp flag value
  7200. *
  7201. * Return: void
  7202. */
  7203. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  7204. bool val)
  7205. {
  7206. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7207. pdev->hmmc_tid_override_en = val;
  7208. }
  7209. /**
  7210. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  7211. * @pdev_handle: pdev handle
  7212. * @tid: tid value
  7213. *
  7214. * Return: void
  7215. */
  7216. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7217. uint8_t tid)
  7218. {
  7219. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7220. pdev->hmmc_tid = tid;
  7221. }
  7222. /**
  7223. * dp_fw_stats_process(): Process TxRX FW stats request
  7224. * @vdev_handle: DP VDEV handle
  7225. * @req: stats request
  7226. *
  7227. * return: int
  7228. */
  7229. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  7230. struct cdp_txrx_stats_req *req)
  7231. {
  7232. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7233. struct dp_pdev *pdev = NULL;
  7234. uint32_t stats = req->stats;
  7235. uint8_t mac_id = req->mac_id;
  7236. if (!vdev) {
  7237. DP_TRACE(NONE, "VDEV not found");
  7238. return 1;
  7239. }
  7240. pdev = vdev->pdev;
  7241. /*
  7242. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7243. * from param0 to param3 according to below rule:
  7244. *
  7245. * PARAM:
  7246. * - config_param0 : start_offset (stats type)
  7247. * - config_param1 : stats bmask from start offset
  7248. * - config_param2 : stats bmask from start offset + 32
  7249. * - config_param3 : stats bmask from start offset + 64
  7250. */
  7251. if (req->stats == CDP_TXRX_STATS_0) {
  7252. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7253. req->param1 = 0xFFFFFFFF;
  7254. req->param2 = 0xFFFFFFFF;
  7255. req->param3 = 0xFFFFFFFF;
  7256. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7257. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7258. }
  7259. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7260. req->param1, req->param2, req->param3,
  7261. 0, 0, mac_id);
  7262. }
  7263. /**
  7264. * dp_txrx_stats_request - function to map to firmware and host stats
  7265. * @vdev: virtual handle
  7266. * @req: stats request
  7267. *
  7268. * Return: QDF_STATUS
  7269. */
  7270. static
  7271. QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
  7272. struct cdp_txrx_stats_req *req)
  7273. {
  7274. int host_stats;
  7275. int fw_stats;
  7276. enum cdp_stats stats;
  7277. int num_stats;
  7278. if (!vdev || !req) {
  7279. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7280. "Invalid vdev/req instance");
  7281. return QDF_STATUS_E_INVAL;
  7282. }
  7283. stats = req->stats;
  7284. if (stats >= CDP_TXRX_MAX_STATS)
  7285. return QDF_STATUS_E_INVAL;
  7286. /*
  7287. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7288. * has to be updated if new FW HTT stats added
  7289. */
  7290. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7291. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7292. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7293. if (stats >= num_stats) {
  7294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7295. "%s: Invalid stats option: %d", __func__, stats);
  7296. return QDF_STATUS_E_INVAL;
  7297. }
  7298. req->stats = stats;
  7299. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7300. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7301. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7302. "stats: %u fw_stats_type: %d host_stats: %d",
  7303. stats, fw_stats, host_stats);
  7304. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7305. /* update request with FW stats type */
  7306. req->stats = fw_stats;
  7307. return dp_fw_stats_process(vdev, req);
  7308. }
  7309. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7310. (host_stats <= TXRX_HOST_STATS_MAX))
  7311. return dp_print_host_stats(vdev, req);
  7312. else
  7313. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7314. "Wrong Input for TxRx Stats");
  7315. return QDF_STATUS_SUCCESS;
  7316. }
  7317. /*
  7318. * dp_print_napi_stats(): NAPI stats
  7319. * @soc - soc handle
  7320. */
  7321. static void dp_print_napi_stats(struct dp_soc *soc)
  7322. {
  7323. hif_print_napi_stats(soc->hif_handle);
  7324. }
  7325. /*
  7326. * dp_print_per_ring_stats(): Packet count per ring
  7327. * @soc - soc handle
  7328. */
  7329. static void dp_print_per_ring_stats(struct dp_soc *soc)
  7330. {
  7331. uint8_t ring;
  7332. uint16_t core;
  7333. uint64_t total_packets;
  7334. DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
  7335. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  7336. total_packets = 0;
  7337. DP_TRACE_STATS(INFO_HIGH,
  7338. "Packets on ring %u:", ring);
  7339. for (core = 0; core < NR_CPUS; core++) {
  7340. DP_TRACE_STATS(INFO_HIGH,
  7341. "Packets arriving on core %u: %llu",
  7342. core,
  7343. soc->stats.rx.ring_packets[core][ring]);
  7344. total_packets += soc->stats.rx.ring_packets[core][ring];
  7345. }
  7346. DP_TRACE_STATS(INFO_HIGH,
  7347. "Total packets on ring %u: %llu",
  7348. ring, total_packets);
  7349. }
  7350. }
  7351. /*
  7352. * dp_txrx_path_stats() - Function to display dump stats
  7353. * @soc - soc handle
  7354. *
  7355. * return: none
  7356. */
  7357. static void dp_txrx_path_stats(struct dp_soc *soc)
  7358. {
  7359. uint8_t error_code;
  7360. uint8_t loop_pdev;
  7361. struct dp_pdev *pdev;
  7362. uint8_t i;
  7363. if (!soc) {
  7364. DP_TRACE(ERROR, "%s: Invalid access",
  7365. __func__);
  7366. return;
  7367. }
  7368. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  7369. pdev = soc->pdev_list[loop_pdev];
  7370. dp_aggregate_pdev_stats(pdev);
  7371. DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
  7372. DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
  7373. pdev->stats.tx_i.rcvd.num,
  7374. pdev->stats.tx_i.rcvd.bytes);
  7375. DP_TRACE_STATS(INFO_HIGH,
  7376. "processed from host: %u msdus (%llu bytes)",
  7377. pdev->stats.tx_i.processed.num,
  7378. pdev->stats.tx_i.processed.bytes);
  7379. DP_TRACE_STATS(INFO_HIGH,
  7380. "successfully transmitted: %u msdus (%llu bytes)",
  7381. pdev->stats.tx.tx_success.num,
  7382. pdev->stats.tx.tx_success.bytes);
  7383. DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
  7384. DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
  7385. pdev->stats.tx_i.dropped.dropped_pkt.num);
  7386. DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
  7387. pdev->stats.tx_i.dropped.desc_na.num);
  7388. DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
  7389. pdev->stats.tx_i.dropped.ring_full);
  7390. DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
  7391. pdev->stats.tx_i.dropped.enqueue_fail);
  7392. DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
  7393. pdev->stats.tx_i.dropped.dma_error);
  7394. DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
  7395. DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
  7396. pdev->stats.tx.tx_failed);
  7397. DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
  7398. pdev->stats.tx.dropped.age_out);
  7399. DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
  7400. pdev->stats.tx.dropped.fw_rem.num);
  7401. DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
  7402. pdev->stats.tx.dropped.fw_rem.bytes);
  7403. DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
  7404. pdev->stats.tx.dropped.fw_rem_tx);
  7405. DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
  7406. pdev->stats.tx.dropped.fw_rem_notx);
  7407. DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
  7408. pdev->soc->stats.tx.tx_invalid_peer.num);
  7409. DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
  7410. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7411. pdev->stats.tx_comp_histogram.pkts_1);
  7412. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7413. pdev->stats.tx_comp_histogram.pkts_2_20);
  7414. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7415. pdev->stats.tx_comp_histogram.pkts_21_40);
  7416. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7417. pdev->stats.tx_comp_histogram.pkts_41_60);
  7418. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7419. pdev->stats.tx_comp_histogram.pkts_61_80);
  7420. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7421. pdev->stats.tx_comp_histogram.pkts_81_100);
  7422. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7423. pdev->stats.tx_comp_histogram.pkts_101_200);
  7424. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7425. pdev->stats.tx_comp_histogram.pkts_201_plus);
  7426. DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
  7427. DP_TRACE_STATS(INFO_HIGH,
  7428. "delivered %u msdus ( %llu bytes),",
  7429. pdev->stats.rx.to_stack.num,
  7430. pdev->stats.rx.to_stack.bytes);
  7431. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  7432. DP_TRACE_STATS(INFO_HIGH,
  7433. "received on reo[%d] %u msdus( %llu bytes),",
  7434. i, pdev->stats.rx.rcvd_reo[i].num,
  7435. pdev->stats.rx.rcvd_reo[i].bytes);
  7436. DP_TRACE_STATS(INFO_HIGH,
  7437. "intra-bss packets %u msdus ( %llu bytes),",
  7438. pdev->stats.rx.intra_bss.pkts.num,
  7439. pdev->stats.rx.intra_bss.pkts.bytes);
  7440. DP_TRACE_STATS(INFO_HIGH,
  7441. "intra-bss fails %u msdus ( %llu bytes),",
  7442. pdev->stats.rx.intra_bss.fail.num,
  7443. pdev->stats.rx.intra_bss.fail.bytes);
  7444. DP_TRACE_STATS(INFO_HIGH,
  7445. "raw packets %u msdus ( %llu bytes),",
  7446. pdev->stats.rx.raw.num,
  7447. pdev->stats.rx.raw.bytes);
  7448. DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
  7449. pdev->stats.rx.err.mic_err);
  7450. DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
  7451. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  7452. DP_TRACE_STATS(INFO_HIGH, "sw_peer_id invalid %u",
  7453. pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
  7454. DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
  7455. DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
  7456. pdev->soc->stats.rx.err.invalid_rbm);
  7457. DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
  7458. pdev->soc->stats.rx.err.hal_ring_access_fail);
  7459. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  7460. error_code++) {
  7461. if (!pdev->soc->stats.rx.err.reo_error[error_code])
  7462. continue;
  7463. DP_TRACE_STATS(INFO_HIGH,
  7464. "Reo error number (%u): %u msdus",
  7465. error_code,
  7466. pdev->soc->stats.rx.err
  7467. .reo_error[error_code]);
  7468. }
  7469. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  7470. error_code++) {
  7471. if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
  7472. continue;
  7473. DP_TRACE_STATS(INFO_HIGH,
  7474. "Rxdma error number (%u): %u msdus",
  7475. error_code,
  7476. pdev->soc->stats.rx.err
  7477. .rxdma_error[error_code]);
  7478. }
  7479. DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
  7480. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7481. pdev->stats.rx_ind_histogram.pkts_1);
  7482. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7483. pdev->stats.rx_ind_histogram.pkts_2_20);
  7484. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7485. pdev->stats.rx_ind_histogram.pkts_21_40);
  7486. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7487. pdev->stats.rx_ind_histogram.pkts_41_60);
  7488. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7489. pdev->stats.rx_ind_histogram.pkts_61_80);
  7490. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7491. pdev->stats.rx_ind_histogram.pkts_81_100);
  7492. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7493. pdev->stats.rx_ind_histogram.pkts_101_200);
  7494. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7495. pdev->stats.rx_ind_histogram.pkts_201_plus);
  7496. DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  7497. __func__,
  7498. pdev->soc->wlan_cfg_ctx
  7499. ->tso_enabled,
  7500. pdev->soc->wlan_cfg_ctx
  7501. ->lro_enabled,
  7502. pdev->soc->wlan_cfg_ctx
  7503. ->rx_hash,
  7504. pdev->soc->wlan_cfg_ctx
  7505. ->napi_enabled);
  7506. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7507. DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  7508. __func__,
  7509. pdev->soc->wlan_cfg_ctx
  7510. ->tx_flow_stop_queue_threshold,
  7511. pdev->soc->wlan_cfg_ctx
  7512. ->tx_flow_start_queue_offset);
  7513. #endif
  7514. }
  7515. }
  7516. /*
  7517. * dp_txrx_dump_stats() - Dump statistics
  7518. * @value - Statistics option
  7519. */
  7520. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  7521. enum qdf_stats_verbosity_level level)
  7522. {
  7523. struct dp_soc *soc =
  7524. (struct dp_soc *)psoc;
  7525. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7526. if (!soc) {
  7527. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7528. "%s: soc is NULL", __func__);
  7529. return QDF_STATUS_E_INVAL;
  7530. }
  7531. switch (value) {
  7532. case CDP_TXRX_PATH_STATS:
  7533. dp_txrx_path_stats(soc);
  7534. break;
  7535. case CDP_RX_RING_STATS:
  7536. dp_print_per_ring_stats(soc);
  7537. break;
  7538. case CDP_TXRX_TSO_STATS:
  7539. /* TODO: NOT IMPLEMENTED */
  7540. break;
  7541. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7542. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7543. break;
  7544. case CDP_DP_NAPI_STATS:
  7545. dp_print_napi_stats(soc);
  7546. break;
  7547. case CDP_TXRX_DESC_STATS:
  7548. /* TODO: NOT IMPLEMENTED */
  7549. break;
  7550. default:
  7551. status = QDF_STATUS_E_INVAL;
  7552. break;
  7553. }
  7554. return status;
  7555. }
  7556. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7557. /**
  7558. * dp_update_flow_control_parameters() - API to store datapath
  7559. * config parameters
  7560. * @soc: soc handle
  7561. * @cfg: ini parameter handle
  7562. *
  7563. * Return: void
  7564. */
  7565. static inline
  7566. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7567. struct cdp_config_params *params)
  7568. {
  7569. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7570. params->tx_flow_stop_queue_threshold;
  7571. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7572. params->tx_flow_start_queue_offset;
  7573. }
  7574. #else
  7575. static inline
  7576. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7577. struct cdp_config_params *params)
  7578. {
  7579. }
  7580. #endif
  7581. /**
  7582. * dp_update_config_parameters() - API to store datapath
  7583. * config parameters
  7584. * @soc: soc handle
  7585. * @cfg: ini parameter handle
  7586. *
  7587. * Return: status
  7588. */
  7589. static
  7590. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7591. struct cdp_config_params *params)
  7592. {
  7593. struct dp_soc *soc = (struct dp_soc *)psoc;
  7594. if (!(soc)) {
  7595. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7596. "%s: Invalid handle", __func__);
  7597. return QDF_STATUS_E_INVAL;
  7598. }
  7599. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7600. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7601. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7602. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7603. params->tcp_udp_checksumoffload;
  7604. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7605. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7606. soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
  7607. dp_update_flow_control_parameters(soc, params);
  7608. return QDF_STATUS_SUCCESS;
  7609. }
  7610. /**
  7611. * dp_txrx_set_wds_rx_policy() - API to store datapath
  7612. * config parameters
  7613. * @vdev_handle - datapath vdev handle
  7614. * @cfg: ini parameter handle
  7615. *
  7616. * Return: status
  7617. */
  7618. #ifdef WDS_VENDOR_EXTENSION
  7619. void
  7620. dp_txrx_set_wds_rx_policy(
  7621. struct cdp_vdev *vdev_handle,
  7622. u_int32_t val)
  7623. {
  7624. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7625. struct dp_peer *peer;
  7626. if (vdev->opmode == wlan_op_mode_ap) {
  7627. /* for ap, set it on bss_peer */
  7628. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  7629. if (peer->bss_peer) {
  7630. peer->wds_ecm.wds_rx_filter = 1;
  7631. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7632. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7633. break;
  7634. }
  7635. }
  7636. } else if (vdev->opmode == wlan_op_mode_sta) {
  7637. peer = TAILQ_FIRST(&vdev->peer_list);
  7638. peer->wds_ecm.wds_rx_filter = 1;
  7639. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7640. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7641. }
  7642. }
  7643. /**
  7644. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  7645. *
  7646. * @peer_handle - datapath peer handle
  7647. * @wds_tx_ucast: policy for unicast transmission
  7648. * @wds_tx_mcast: policy for multicast transmission
  7649. *
  7650. * Return: void
  7651. */
  7652. void
  7653. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  7654. int wds_tx_ucast, int wds_tx_mcast)
  7655. {
  7656. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7657. if (wds_tx_ucast || wds_tx_mcast) {
  7658. peer->wds_enabled = 1;
  7659. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  7660. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  7661. } else {
  7662. peer->wds_enabled = 0;
  7663. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  7664. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  7665. }
  7666. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7667. FL("Policy Update set to :\
  7668. peer->wds_enabled %d\
  7669. peer->wds_ecm.wds_tx_ucast_4addr %d\
  7670. peer->wds_ecm.wds_tx_mcast_4addr %d"),
  7671. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  7672. peer->wds_ecm.wds_tx_mcast_4addr);
  7673. return;
  7674. }
  7675. #endif
  7676. static struct cdp_wds_ops dp_ops_wds = {
  7677. .vdev_set_wds = dp_vdev_set_wds,
  7678. #ifdef WDS_VENDOR_EXTENSION
  7679. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7680. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7681. #endif
  7682. };
  7683. /*
  7684. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7685. * @vdev_handle - datapath vdev handle
  7686. * @callback - callback function
  7687. * @ctxt: callback context
  7688. *
  7689. */
  7690. static void
  7691. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  7692. ol_txrx_data_tx_cb callback, void *ctxt)
  7693. {
  7694. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7695. vdev->tx_non_std_data_callback.func = callback;
  7696. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7697. }
  7698. /**
  7699. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7700. * @pdev_hdl: datapath pdev handle
  7701. *
  7702. * Return: opaque pointer to dp txrx handle
  7703. */
  7704. static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
  7705. {
  7706. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7707. return pdev->dp_txrx_handle;
  7708. }
  7709. /**
  7710. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7711. * @pdev_hdl: datapath pdev handle
  7712. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7713. *
  7714. * Return: void
  7715. */
  7716. static void
  7717. dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
  7718. {
  7719. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7720. pdev->dp_txrx_handle = dp_txrx_hdl;
  7721. }
  7722. /**
  7723. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7724. * @soc_handle: datapath soc handle
  7725. *
  7726. * Return: opaque pointer to external dp (non-core DP)
  7727. */
  7728. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7729. {
  7730. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7731. return soc->external_txrx_handle;
  7732. }
  7733. /**
  7734. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7735. * @soc_handle: datapath soc handle
  7736. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7737. *
  7738. * Return: void
  7739. */
  7740. static void
  7741. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7742. {
  7743. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7744. soc->external_txrx_handle = txrx_handle;
  7745. }
  7746. /**
  7747. * dp_get_cfg_capabilities() - get dp capabilities
  7748. * @soc_handle: datapath soc handle
  7749. * @dp_caps: enum for dp capabilities
  7750. *
  7751. * Return: bool to determine if dp caps is enabled
  7752. */
  7753. static bool
  7754. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7755. enum cdp_capabilities dp_caps)
  7756. {
  7757. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7758. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7759. }
  7760. #ifdef FEATURE_AST
  7761. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  7762. {
  7763. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  7764. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  7765. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7766. /*
  7767. * For BSS peer, new peer is not created on alloc_node if the
  7768. * peer with same address already exists , instead refcnt is
  7769. * increased for existing peer. Correspondingly in delete path,
  7770. * only refcnt is decreased; and peer is only deleted , when all
  7771. * references are deleted. So delete_in_progress should not be set
  7772. * for bss_peer, unless only 2 reference remains (peer map reference
  7773. * and peer hash table reference).
  7774. */
  7775. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
  7776. return;
  7777. }
  7778. qdf_spin_lock_bh(&soc->ast_lock);
  7779. peer->delete_in_progress = true;
  7780. dp_peer_delete_ast_entries(soc, peer);
  7781. qdf_spin_unlock_bh(&soc->ast_lock);
  7782. }
  7783. #endif
  7784. #ifdef ATH_SUPPORT_NAC_RSSI
  7785. /**
  7786. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7787. * @vdev_hdl: DP vdev handle
  7788. * @rssi: rssi value
  7789. *
  7790. * Return: 0 for success. nonzero for failure.
  7791. */
  7792. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7793. char *mac_addr,
  7794. uint8_t *rssi)
  7795. {
  7796. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7797. struct dp_pdev *pdev = vdev->pdev;
  7798. struct dp_neighbour_peer *peer = NULL;
  7799. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7800. *rssi = 0;
  7801. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7802. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7803. neighbour_peer_list_elem) {
  7804. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7805. mac_addr, DP_MAC_ADDR_LEN) == 0) {
  7806. *rssi = peer->rssi;
  7807. status = QDF_STATUS_SUCCESS;
  7808. break;
  7809. }
  7810. }
  7811. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7812. return status;
  7813. }
  7814. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7815. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7816. uint8_t chan_num)
  7817. {
  7818. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7819. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7820. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7821. pdev->nac_rssi_filtering = 1;
  7822. /* Store address of NAC (neighbour peer) which will be checked
  7823. * against TA of received packets.
  7824. */
  7825. if (cmd == CDP_NAC_PARAM_ADD) {
  7826. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7827. client_macaddr);
  7828. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7829. dp_update_filter_neighbour_peers(vdev_handle,
  7830. DP_NAC_PARAM_DEL,
  7831. client_macaddr);
  7832. }
  7833. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7834. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7835. ((void *)vdev->pdev->ctrl_pdev,
  7836. vdev->vdev_id, cmd, bssid);
  7837. return QDF_STATUS_SUCCESS;
  7838. }
  7839. #endif
  7840. /**
  7841. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7842. * for pktlog
  7843. * @txrx_pdev_handle: cdp_pdev handle
  7844. * @enb_dsb: Enable or disable peer based filtering
  7845. *
  7846. * Return: QDF_STATUS
  7847. */
  7848. static int
  7849. dp_enable_peer_based_pktlog(
  7850. struct cdp_pdev *txrx_pdev_handle,
  7851. char *mac_addr, uint8_t enb_dsb)
  7852. {
  7853. struct dp_peer *peer;
  7854. uint8_t local_id;
  7855. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7856. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7857. mac_addr, &local_id);
  7858. if (!peer) {
  7859. dp_err("Invalid Peer");
  7860. return QDF_STATUS_E_FAILURE;
  7861. }
  7862. peer->peer_based_pktlog_filter = enb_dsb;
  7863. pdev->dp_peer_based_pktlog = enb_dsb;
  7864. return QDF_STATUS_SUCCESS;
  7865. }
  7866. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  7867. uint32_t max_peers,
  7868. uint32_t max_ast_index,
  7869. bool peer_map_unmap_v2)
  7870. {
  7871. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7872. soc->max_peers = max_peers;
  7873. qdf_print ("%s max_peers %u, max_ast_index: %u\n",
  7874. __func__, max_peers, max_ast_index);
  7875. wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
  7876. if (dp_peer_find_attach(soc))
  7877. return QDF_STATUS_E_FAILURE;
  7878. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  7879. return QDF_STATUS_SUCCESS;
  7880. }
  7881. /**
  7882. * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
  7883. * @dp_pdev: dp pdev handle
  7884. * @ctrl_pdev: UMAC ctrl pdev handle
  7885. *
  7886. * Return: void
  7887. */
  7888. static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
  7889. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  7890. {
  7891. struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
  7892. pdev->ctrl_pdev = ctrl_pdev;
  7893. }
  7894. /*
  7895. * dp_get_cfg() - get dp cfg
  7896. * @soc: cdp soc handle
  7897. * @cfg: cfg enum
  7898. *
  7899. * Return: cfg value
  7900. */
  7901. static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
  7902. {
  7903. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  7904. uint32_t value = 0;
  7905. switch (cfg) {
  7906. case cfg_dp_enable_data_stall:
  7907. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  7908. break;
  7909. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  7910. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  7911. break;
  7912. case cfg_dp_tso_enable:
  7913. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  7914. break;
  7915. case cfg_dp_lro_enable:
  7916. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  7917. break;
  7918. case cfg_dp_gro_enable:
  7919. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  7920. break;
  7921. case cfg_dp_tx_flow_start_queue_offset:
  7922. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  7923. break;
  7924. case cfg_dp_tx_flow_stop_queue_threshold:
  7925. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  7926. break;
  7927. case cfg_dp_disable_intra_bss_fwd:
  7928. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  7929. break;
  7930. default:
  7931. value = 0;
  7932. }
  7933. return value;
  7934. }
  7935. static struct cdp_cmn_ops dp_ops_cmn = {
  7936. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  7937. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  7938. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  7939. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  7940. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  7941. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  7942. .txrx_peer_create = dp_peer_create_wifi3,
  7943. .txrx_peer_setup = dp_peer_setup_wifi3,
  7944. #ifdef FEATURE_AST
  7945. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  7946. #else
  7947. .txrx_peer_teardown = NULL,
  7948. #endif
  7949. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  7950. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  7951. .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
  7952. .txrx_peer_get_ast_info_by_pdev =
  7953. dp_peer_get_ast_info_by_pdevid_wifi3,
  7954. .txrx_peer_ast_delete_by_soc =
  7955. dp_peer_ast_entry_del_by_soc,
  7956. .txrx_peer_ast_delete_by_pdev =
  7957. dp_peer_ast_entry_del_by_pdev,
  7958. .txrx_peer_delete = dp_peer_delete_wifi3,
  7959. .txrx_vdev_register = dp_vdev_register_wifi3,
  7960. .txrx_vdev_flush_peers = dp_vdev_flush_peers,
  7961. .txrx_soc_detach = dp_soc_detach_wifi3,
  7962. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  7963. .txrx_soc_init = dp_soc_init_wifi3,
  7964. .txrx_tso_soc_attach = dp_tso_soc_attach,
  7965. .txrx_tso_soc_detach = dp_tso_soc_detach,
  7966. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  7967. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  7968. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  7969. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  7970. .txrx_ath_getstats = dp_get_device_stats,
  7971. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  7972. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  7973. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  7974. .delba_process = dp_delba_process_wifi3,
  7975. .set_addba_response = dp_set_addba_response,
  7976. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  7977. .flush_cache_rx_queue = NULL,
  7978. /* TODO: get API's for dscp-tid need to be added*/
  7979. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  7980. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  7981. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  7982. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  7983. .txrx_get_total_per = dp_get_total_per,
  7984. .txrx_stats_request = dp_txrx_stats_request,
  7985. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  7986. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  7987. .txrx_get_vow_config_frm_pdev = NULL,
  7988. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  7989. .txrx_set_nac = dp_set_nac,
  7990. .txrx_get_tx_pending = dp_get_tx_pending,
  7991. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  7992. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  7993. .display_stats = dp_txrx_dump_stats,
  7994. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  7995. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  7996. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  7997. .txrx_intr_detach = dp_soc_interrupt_detach,
  7998. .set_pn_check = dp_set_pn_check_wifi3,
  7999. .update_config_parameters = dp_update_config_parameters,
  8000. /* TODO: Add other functions */
  8001. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  8002. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  8003. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  8004. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  8005. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  8006. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  8007. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  8008. .tx_send = dp_tx_send,
  8009. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  8010. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  8011. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  8012. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  8013. .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
  8014. .txrx_get_os_rx_handles_from_vdev =
  8015. dp_get_os_rx_handles_from_vdev_wifi3,
  8016. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  8017. .get_dp_capabilities = dp_get_cfg_capabilities,
  8018. .txrx_get_cfg = dp_get_cfg,
  8019. };
  8020. static struct cdp_ctrl_ops dp_ops_ctrl = {
  8021. .txrx_peer_authorize = dp_peer_authorize,
  8022. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  8023. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  8024. #ifdef MESH_MODE_SUPPORT
  8025. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  8026. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  8027. #endif
  8028. .txrx_set_vdev_param = dp_set_vdev_param,
  8029. .txrx_peer_set_nawds = dp_peer_set_nawds,
  8030. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  8031. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  8032. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  8033. .txrx_update_filter_neighbour_peers =
  8034. dp_update_filter_neighbour_peers,
  8035. .txrx_get_sec_type = dp_get_sec_type,
  8036. /* TODO: Add other functions */
  8037. .txrx_wdi_event_sub = dp_wdi_event_sub,
  8038. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  8039. #ifdef WDI_EVENT_ENABLE
  8040. .txrx_get_pldev = dp_get_pldev,
  8041. #endif
  8042. .txrx_set_pdev_param = dp_set_pdev_param,
  8043. #ifdef ATH_SUPPORT_NAC_RSSI
  8044. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  8045. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  8046. #endif
  8047. .set_key = dp_set_michael_key,
  8048. .txrx_get_vdev_param = dp_get_vdev_param,
  8049. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  8050. };
  8051. static struct cdp_me_ops dp_ops_me = {
  8052. #ifdef ATH_SUPPORT_IQUE
  8053. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  8054. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  8055. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  8056. #endif
  8057. .tx_me_find_ast_entry = NULL,
  8058. };
  8059. static struct cdp_mon_ops dp_ops_mon = {
  8060. .txrx_monitor_set_filter_ucast_data = NULL,
  8061. .txrx_monitor_set_filter_mcast_data = NULL,
  8062. .txrx_monitor_set_filter_non_data = NULL,
  8063. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  8064. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  8065. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  8066. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  8067. /* Added support for HK advance filter */
  8068. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  8069. };
  8070. static struct cdp_host_stats_ops dp_ops_host_stats = {
  8071. .txrx_per_peer_stats = dp_get_host_peer_stats,
  8072. .get_fw_peer_stats = dp_get_fw_peer_stats,
  8073. .get_htt_stats = dp_get_htt_stats,
  8074. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  8075. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  8076. .txrx_stats_publish = dp_txrx_stats_publish,
  8077. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  8078. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  8079. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  8080. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  8081. /* TODO */
  8082. };
  8083. static struct cdp_raw_ops dp_ops_raw = {
  8084. /* TODO */
  8085. };
  8086. #ifdef CONFIG_WIN
  8087. static struct cdp_pflow_ops dp_ops_pflow = {
  8088. /* TODO */
  8089. };
  8090. #endif /* CONFIG_WIN */
  8091. #ifdef FEATURE_RUNTIME_PM
  8092. /**
  8093. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  8094. * @opaque_pdev: DP pdev context
  8095. *
  8096. * DP is ready to runtime suspend if there are no pending TX packets.
  8097. *
  8098. * Return: QDF_STATUS
  8099. */
  8100. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  8101. {
  8102. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8103. struct dp_soc *soc = pdev->soc;
  8104. /* Abort if there are any pending TX packets */
  8105. if (dp_get_tx_pending(opaque_pdev) > 0) {
  8106. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  8107. FL("Abort suspend due to pending TX packets"));
  8108. return QDF_STATUS_E_AGAIN;
  8109. }
  8110. if (soc->intr_mode == DP_INTR_POLL)
  8111. qdf_timer_stop(&soc->int_timer);
  8112. return QDF_STATUS_SUCCESS;
  8113. }
  8114. /**
  8115. * dp_runtime_resume() - ensure DP is ready to runtime resume
  8116. * @opaque_pdev: DP pdev context
  8117. *
  8118. * Resume DP for runtime PM.
  8119. *
  8120. * Return: QDF_STATUS
  8121. */
  8122. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  8123. {
  8124. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8125. struct dp_soc *soc = pdev->soc;
  8126. void *hal_srng;
  8127. int i;
  8128. if (soc->intr_mode == DP_INTR_POLL)
  8129. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8130. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  8131. hal_srng = soc->tcl_data_ring[i].hal_srng;
  8132. if (hal_srng) {
  8133. /* We actually only need to acquire the lock */
  8134. hal_srng_access_start(soc->hal_soc, hal_srng);
  8135. /* Update SRC ring head pointer for HW to send
  8136. all pending packets */
  8137. hal_srng_access_end(soc->hal_soc, hal_srng);
  8138. }
  8139. }
  8140. return QDF_STATUS_SUCCESS;
  8141. }
  8142. #endif /* FEATURE_RUNTIME_PM */
  8143. #ifndef CONFIG_WIN
  8144. static struct cdp_misc_ops dp_ops_misc = {
  8145. .tx_non_std = dp_tx_non_std,
  8146. .get_opmode = dp_get_opmode,
  8147. #ifdef FEATURE_RUNTIME_PM
  8148. .runtime_suspend = dp_runtime_suspend,
  8149. .runtime_resume = dp_runtime_resume,
  8150. #endif /* FEATURE_RUNTIME_PM */
  8151. .pkt_log_init = dp_pkt_log_init,
  8152. .pkt_log_con_service = dp_pkt_log_con_service,
  8153. .get_num_rx_contexts = dp_get_num_rx_contexts,
  8154. };
  8155. static struct cdp_flowctl_ops dp_ops_flowctl = {
  8156. /* WIFI 3.0 DP implement as required. */
  8157. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  8158. .flow_pool_map_handler = dp_tx_flow_pool_map,
  8159. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  8160. .register_pause_cb = dp_txrx_register_pause_cb,
  8161. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  8162. .tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
  8163. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  8164. };
  8165. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  8166. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8167. };
  8168. #ifdef IPA_OFFLOAD
  8169. static struct cdp_ipa_ops dp_ops_ipa = {
  8170. .ipa_get_resource = dp_ipa_get_resource,
  8171. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  8172. .ipa_op_response = dp_ipa_op_response,
  8173. .ipa_register_op_cb = dp_ipa_register_op_cb,
  8174. .ipa_get_stat = dp_ipa_get_stat,
  8175. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  8176. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  8177. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  8178. .ipa_setup = dp_ipa_setup,
  8179. .ipa_cleanup = dp_ipa_cleanup,
  8180. .ipa_setup_iface = dp_ipa_setup_iface,
  8181. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  8182. .ipa_enable_pipes = dp_ipa_enable_pipes,
  8183. .ipa_disable_pipes = dp_ipa_disable_pipes,
  8184. .ipa_set_perf_level = dp_ipa_set_perf_level
  8185. };
  8186. #endif
  8187. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  8188. {
  8189. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8190. struct dp_soc *soc = pdev->soc;
  8191. int timeout = SUSPEND_DRAIN_WAIT;
  8192. int drain_wait_delay = 50; /* 50 ms */
  8193. /* Abort if there are any pending TX packets */
  8194. while (dp_get_tx_pending(opaque_pdev) > 0) {
  8195. qdf_sleep(drain_wait_delay);
  8196. if (timeout <= 0) {
  8197. dp_err("TX frames are pending, abort suspend");
  8198. return QDF_STATUS_E_TIMEOUT;
  8199. }
  8200. timeout = timeout - drain_wait_delay;
  8201. }
  8202. if (soc->intr_mode == DP_INTR_POLL)
  8203. qdf_timer_stop(&soc->int_timer);
  8204. return QDF_STATUS_SUCCESS;
  8205. }
  8206. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  8207. {
  8208. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8209. struct dp_soc *soc = pdev->soc;
  8210. if (soc->intr_mode == DP_INTR_POLL)
  8211. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8212. return QDF_STATUS_SUCCESS;
  8213. }
  8214. static struct cdp_bus_ops dp_ops_bus = {
  8215. .bus_suspend = dp_bus_suspend,
  8216. .bus_resume = dp_bus_resume
  8217. };
  8218. static struct cdp_ocb_ops dp_ops_ocb = {
  8219. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8220. };
  8221. static struct cdp_throttle_ops dp_ops_throttle = {
  8222. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8223. };
  8224. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  8225. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8226. };
  8227. static struct cdp_cfg_ops dp_ops_cfg = {
  8228. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8229. };
  8230. /*
  8231. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8232. * @dev: physical device instance
  8233. * @peer_mac_addr: peer mac address
  8234. * @local_id: local id for the peer
  8235. * @debug_id: to track enum peer access
  8236. *
  8237. * Return: peer instance pointer
  8238. */
  8239. static inline void *
  8240. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8241. uint8_t *local_id,
  8242. enum peer_debug_id_type debug_id)
  8243. {
  8244. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8245. struct dp_peer *peer;
  8246. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8247. if (!peer)
  8248. return NULL;
  8249. *local_id = peer->local_id;
  8250. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  8251. return peer;
  8252. }
  8253. /*
  8254. * dp_peer_release_ref - release peer ref count
  8255. * @peer: peer handle
  8256. * @debug_id: to track enum peer access
  8257. *
  8258. * Return: None
  8259. */
  8260. static inline
  8261. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8262. {
  8263. dp_peer_unref_delete(peer);
  8264. }
  8265. static struct cdp_peer_ops dp_ops_peer = {
  8266. .register_peer = dp_register_peer,
  8267. .clear_peer = dp_clear_peer,
  8268. .find_peer_by_addr = dp_find_peer_by_addr,
  8269. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8270. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8271. .peer_release_ref = dp_peer_release_ref,
  8272. .local_peer_id = dp_local_peer_id,
  8273. .peer_find_by_local_id = dp_peer_find_by_local_id,
  8274. .peer_state_update = dp_peer_state_update,
  8275. .get_vdevid = dp_get_vdevid,
  8276. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  8277. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8278. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8279. .get_peer_state = dp_get_peer_state,
  8280. };
  8281. #endif
  8282. static struct cdp_ops dp_txrx_ops = {
  8283. .cmn_drv_ops = &dp_ops_cmn,
  8284. .ctrl_ops = &dp_ops_ctrl,
  8285. .me_ops = &dp_ops_me,
  8286. .mon_ops = &dp_ops_mon,
  8287. .host_stats_ops = &dp_ops_host_stats,
  8288. .wds_ops = &dp_ops_wds,
  8289. .raw_ops = &dp_ops_raw,
  8290. #ifdef CONFIG_WIN
  8291. .pflow_ops = &dp_ops_pflow,
  8292. #endif /* CONFIG_WIN */
  8293. #ifndef CONFIG_WIN
  8294. .misc_ops = &dp_ops_misc,
  8295. .cfg_ops = &dp_ops_cfg,
  8296. .flowctl_ops = &dp_ops_flowctl,
  8297. .l_flowctl_ops = &dp_ops_l_flowctl,
  8298. #ifdef IPA_OFFLOAD
  8299. .ipa_ops = &dp_ops_ipa,
  8300. #endif
  8301. .bus_ops = &dp_ops_bus,
  8302. .ocb_ops = &dp_ops_ocb,
  8303. .peer_ops = &dp_ops_peer,
  8304. .throttle_ops = &dp_ops_throttle,
  8305. .mob_stats_ops = &dp_ops_mob_stats,
  8306. #endif
  8307. };
  8308. /*
  8309. * dp_soc_set_txrx_ring_map()
  8310. * @dp_soc: DP handler for soc
  8311. *
  8312. * Return: Void
  8313. */
  8314. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  8315. {
  8316. uint32_t i;
  8317. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  8318. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  8319. }
  8320. }
  8321. #ifdef QCA_WIFI_QCA8074
  8322. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  8323. /**
  8324. * dp_soc_attach_wifi3() - Attach txrx SOC
  8325. * @ctrl_psoc: Opaque SOC handle from control plane
  8326. * @htc_handle: Opaque HTC handle
  8327. * @hif_handle: Opaque HIF handle
  8328. * @qdf_osdev: QDF device
  8329. * @ol_ops: Offload Operations
  8330. * @device_id: Device ID
  8331. *
  8332. * Return: DP SOC handle on success, NULL on failure
  8333. */
  8334. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8335. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8336. struct ol_if_ops *ol_ops, uint16_t device_id)
  8337. {
  8338. struct dp_soc *dp_soc = NULL;
  8339. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8340. ol_ops, device_id);
  8341. if (!dp_soc)
  8342. return NULL;
  8343. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  8344. return NULL;
  8345. return (void *)dp_soc;
  8346. }
  8347. #else
  8348. /**
  8349. * dp_soc_attach_wifi3() - Attach txrx SOC
  8350. * @ctrl_psoc: Opaque SOC handle from control plane
  8351. * @htc_handle: Opaque HTC handle
  8352. * @hif_handle: Opaque HIF handle
  8353. * @qdf_osdev: QDF device
  8354. * @ol_ops: Offload Operations
  8355. * @device_id: Device ID
  8356. *
  8357. * Return: DP SOC handle on success, NULL on failure
  8358. */
  8359. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8360. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8361. struct ol_if_ops *ol_ops, uint16_t device_id)
  8362. {
  8363. struct dp_soc *dp_soc = NULL;
  8364. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8365. ol_ops, device_id);
  8366. return (void *)dp_soc;
  8367. }
  8368. #endif
  8369. /**
  8370. * dp_soc_attach() - Attach txrx SOC
  8371. * @ctrl_psoc: Opaque SOC handle from control plane
  8372. * @htc_handle: Opaque HTC handle
  8373. * @qdf_osdev: QDF device
  8374. * @ol_ops: Offload Operations
  8375. * @device_id: Device ID
  8376. *
  8377. * Return: DP SOC handle on success, NULL on failure
  8378. */
  8379. static struct dp_soc *
  8380. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8381. struct ol_if_ops *ol_ops, uint16_t device_id)
  8382. {
  8383. int int_ctx;
  8384. struct dp_soc *soc = NULL;
  8385. struct htt_soc *htt_soc = NULL;
  8386. soc = qdf_mem_malloc(sizeof(*soc));
  8387. if (!soc) {
  8388. dp_err("DP SOC memory allocation failed");
  8389. goto fail0;
  8390. }
  8391. int_ctx = 0;
  8392. soc->device_id = device_id;
  8393. soc->cdp_soc.ops = &dp_txrx_ops;
  8394. soc->cdp_soc.ol_ops = ol_ops;
  8395. soc->ctrl_psoc = ctrl_psoc;
  8396. soc->osdev = qdf_osdev;
  8397. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  8398. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  8399. if (!soc->wlan_cfg_ctx) {
  8400. dp_err("wlan_cfg_ctx failed\n");
  8401. goto fail1;
  8402. }
  8403. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  8404. if (!htt_soc) {
  8405. dp_err("HTT attach failed");
  8406. goto fail1;
  8407. }
  8408. soc->htt_handle = htt_soc;
  8409. htt_soc->dp_soc = soc;
  8410. htt_soc->htc_soc = htc_handle;
  8411. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  8412. goto fail2;
  8413. return (void *)soc;
  8414. fail2:
  8415. qdf_mem_free(htt_soc);
  8416. fail1:
  8417. qdf_mem_free(soc);
  8418. fail0:
  8419. return NULL;
  8420. }
  8421. /**
  8422. * dp_soc_init() - Initialize txrx SOC
  8423. * @dp_soc: Opaque DP SOC handle
  8424. * @htc_handle: Opaque HTC handle
  8425. * @hif_handle: Opaque HIF handle
  8426. *
  8427. * Return: DP SOC handle on success, NULL on failure
  8428. */
  8429. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
  8430. {
  8431. int target_type;
  8432. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  8433. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  8434. htt_soc->htc_soc = htc_handle;
  8435. soc->hif_handle = hif_handle;
  8436. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  8437. if (!soc->hal_soc)
  8438. return NULL;
  8439. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
  8440. soc->hal_soc, soc->osdev);
  8441. target_type = hal_get_target_type(soc->hal_soc);
  8442. switch (target_type) {
  8443. case TARGET_TYPE_QCA6290:
  8444. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8445. REO_DST_RING_SIZE_QCA6290);
  8446. soc->ast_override_support = 1;
  8447. break;
  8448. #ifdef QCA_WIFI_QCA6390
  8449. case TARGET_TYPE_QCA6390:
  8450. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8451. REO_DST_RING_SIZE_QCA6290);
  8452. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8453. soc->ast_override_support = 1;
  8454. if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  8455. int int_ctx;
  8456. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  8457. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  8458. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  8459. }
  8460. }
  8461. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  8462. break;
  8463. #endif
  8464. case TARGET_TYPE_QCA8074:
  8465. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8466. REO_DST_RING_SIZE_QCA8074);
  8467. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8468. break;
  8469. case TARGET_TYPE_QCA8074V2:
  8470. case TARGET_TYPE_QCA6018:
  8471. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8472. REO_DST_RING_SIZE_QCA8074);
  8473. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  8474. soc->hw_nac_monitor_support = 1;
  8475. soc->ast_override_support = 1;
  8476. soc->per_tid_basize_max_tid = 8;
  8477. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  8478. break;
  8479. default:
  8480. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  8481. qdf_assert_always(0);
  8482. break;
  8483. }
  8484. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  8485. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  8486. soc->cce_disable = false;
  8487. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  8488. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8489. CDP_CFG_MAX_PEER_ID);
  8490. if (ret != -EINVAL) {
  8491. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  8492. }
  8493. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8494. CDP_CFG_CCE_DISABLE);
  8495. if (ret == 1)
  8496. soc->cce_disable = true;
  8497. }
  8498. qdf_spinlock_create(&soc->peer_ref_mutex);
  8499. qdf_spinlock_create(&soc->ast_lock);
  8500. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  8501. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  8502. /* fill the tx/rx cpu ring map*/
  8503. dp_soc_set_txrx_ring_map(soc);
  8504. qdf_spinlock_create(&soc->htt_stats.lock);
  8505. /* initialize work queue for stats processing */
  8506. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  8507. return soc;
  8508. }
  8509. /**
  8510. * dp_soc_init_wifi3() - Initialize txrx SOC
  8511. * @dp_soc: Opaque DP SOC handle
  8512. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  8513. * @hif_handle: Opaque HIF handle
  8514. * @htc_handle: Opaque HTC handle
  8515. * @qdf_osdev: QDF device (Unused)
  8516. * @ol_ops: Offload Operations (Unused)
  8517. * @device_id: Device ID (Unused)
  8518. *
  8519. * Return: DP SOC handle on success, NULL on failure
  8520. */
  8521. void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
  8522. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8523. struct ol_if_ops *ol_ops, uint16_t device_id)
  8524. {
  8525. return dp_soc_init(dpsoc, htc_handle, hif_handle);
  8526. }
  8527. #endif
  8528. /*
  8529. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  8530. *
  8531. * @soc: handle to DP soc
  8532. * @mac_id: MAC id
  8533. *
  8534. * Return: Return pdev corresponding to MAC
  8535. */
  8536. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  8537. {
  8538. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  8539. return soc->pdev_list[mac_id];
  8540. /* Typically for MCL as there only 1 PDEV*/
  8541. return soc->pdev_list[0];
  8542. }
  8543. /*
  8544. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  8545. * @soc: DP SoC context
  8546. * @max_mac_rings: No of MAC rings
  8547. *
  8548. * Return: None
  8549. */
  8550. static
  8551. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  8552. int *max_mac_rings)
  8553. {
  8554. bool dbs_enable = false;
  8555. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  8556. dbs_enable = soc->cdp_soc.ol_ops->
  8557. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  8558. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  8559. }
  8560. /*
  8561. * dp_set_pktlog_wifi3() - attach txrx vdev
  8562. * @pdev: Datapath PDEV handle
  8563. * @event: which event's notifications are being subscribed to
  8564. * @enable: WDI event subscribe or not. (True or False)
  8565. *
  8566. * Return: Success, NULL on failure
  8567. */
  8568. #ifdef WDI_EVENT_ENABLE
  8569. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  8570. bool enable)
  8571. {
  8572. struct dp_soc *soc = NULL;
  8573. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  8574. int max_mac_rings = wlan_cfg_get_num_mac_rings
  8575. (pdev->wlan_cfg_ctx);
  8576. uint8_t mac_id = 0;
  8577. soc = pdev->soc;
  8578. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  8579. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  8580. FL("Max_mac_rings %d "),
  8581. max_mac_rings);
  8582. if (enable) {
  8583. switch (event) {
  8584. case WDI_EVENT_RX_DESC:
  8585. if (pdev->monitor_vdev) {
  8586. /* Nothing needs to be done if monitor mode is
  8587. * enabled
  8588. */
  8589. return 0;
  8590. }
  8591. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  8592. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  8593. htt_tlv_filter.mpdu_start = 1;
  8594. htt_tlv_filter.msdu_start = 1;
  8595. htt_tlv_filter.msdu_end = 1;
  8596. htt_tlv_filter.mpdu_end = 1;
  8597. htt_tlv_filter.packet_header = 1;
  8598. htt_tlv_filter.attention = 1;
  8599. htt_tlv_filter.ppdu_start = 1;
  8600. htt_tlv_filter.ppdu_end = 1;
  8601. htt_tlv_filter.ppdu_end_user_stats = 1;
  8602. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8603. htt_tlv_filter.ppdu_end_status_done = 1;
  8604. htt_tlv_filter.enable_fp = 1;
  8605. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8606. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8607. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8608. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8609. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8610. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8611. for (mac_id = 0; mac_id < max_mac_rings;
  8612. mac_id++) {
  8613. int mac_for_pdev =
  8614. dp_get_mac_id_for_pdev(mac_id,
  8615. pdev->pdev_id);
  8616. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8617. mac_for_pdev,
  8618. pdev->rxdma_mon_status_ring[mac_id]
  8619. .hal_srng,
  8620. RXDMA_MONITOR_STATUS,
  8621. RX_BUFFER_SIZE,
  8622. &htt_tlv_filter);
  8623. }
  8624. if (soc->reap_timer_init)
  8625. qdf_timer_mod(&soc->mon_reap_timer,
  8626. DP_INTR_POLL_TIMER_MS);
  8627. }
  8628. break;
  8629. case WDI_EVENT_LITE_RX:
  8630. if (pdev->monitor_vdev) {
  8631. /* Nothing needs to be done if monitor mode is
  8632. * enabled
  8633. */
  8634. return 0;
  8635. }
  8636. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  8637. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  8638. htt_tlv_filter.ppdu_start = 1;
  8639. htt_tlv_filter.ppdu_end = 1;
  8640. htt_tlv_filter.ppdu_end_user_stats = 1;
  8641. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8642. htt_tlv_filter.ppdu_end_status_done = 1;
  8643. htt_tlv_filter.mpdu_start = 1;
  8644. htt_tlv_filter.enable_fp = 1;
  8645. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8646. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8647. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8648. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8649. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8650. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8651. for (mac_id = 0; mac_id < max_mac_rings;
  8652. mac_id++) {
  8653. int mac_for_pdev =
  8654. dp_get_mac_id_for_pdev(mac_id,
  8655. pdev->pdev_id);
  8656. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8657. mac_for_pdev,
  8658. pdev->rxdma_mon_status_ring[mac_id]
  8659. .hal_srng,
  8660. RXDMA_MONITOR_STATUS,
  8661. RX_BUFFER_SIZE_PKTLOG_LITE,
  8662. &htt_tlv_filter);
  8663. }
  8664. if (soc->reap_timer_init)
  8665. qdf_timer_mod(&soc->mon_reap_timer,
  8666. DP_INTR_POLL_TIMER_MS);
  8667. }
  8668. break;
  8669. case WDI_EVENT_LITE_T2H:
  8670. if (pdev->monitor_vdev) {
  8671. /* Nothing needs to be done if monitor mode is
  8672. * enabled
  8673. */
  8674. return 0;
  8675. }
  8676. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8677. int mac_for_pdev = dp_get_mac_id_for_pdev(
  8678. mac_id, pdev->pdev_id);
  8679. pdev->pktlog_ppdu_stats = true;
  8680. dp_h2t_cfg_stats_msg_send(pdev,
  8681. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  8682. mac_for_pdev);
  8683. }
  8684. break;
  8685. default:
  8686. /* Nothing needs to be done for other pktlog types */
  8687. break;
  8688. }
  8689. } else {
  8690. switch (event) {
  8691. case WDI_EVENT_RX_DESC:
  8692. case WDI_EVENT_LITE_RX:
  8693. if (pdev->monitor_vdev) {
  8694. /* Nothing needs to be done if monitor mode is
  8695. * enabled
  8696. */
  8697. return 0;
  8698. }
  8699. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  8700. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  8701. for (mac_id = 0; mac_id < max_mac_rings;
  8702. mac_id++) {
  8703. int mac_for_pdev =
  8704. dp_get_mac_id_for_pdev(mac_id,
  8705. pdev->pdev_id);
  8706. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8707. mac_for_pdev,
  8708. pdev->rxdma_mon_status_ring[mac_id]
  8709. .hal_srng,
  8710. RXDMA_MONITOR_STATUS,
  8711. RX_BUFFER_SIZE,
  8712. &htt_tlv_filter);
  8713. }
  8714. if (soc->reap_timer_init)
  8715. qdf_timer_stop(&soc->mon_reap_timer);
  8716. }
  8717. break;
  8718. case WDI_EVENT_LITE_T2H:
  8719. if (pdev->monitor_vdev) {
  8720. /* Nothing needs to be done if monitor mode is
  8721. * enabled
  8722. */
  8723. return 0;
  8724. }
  8725. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  8726. * passing value 0. Once these macros will define in htt
  8727. * header file will use proper macros
  8728. */
  8729. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8730. int mac_for_pdev =
  8731. dp_get_mac_id_for_pdev(mac_id,
  8732. pdev->pdev_id);
  8733. pdev->pktlog_ppdu_stats = false;
  8734. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  8735. dp_h2t_cfg_stats_msg_send(pdev, 0,
  8736. mac_for_pdev);
  8737. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  8738. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  8739. mac_for_pdev);
  8740. } else if (pdev->enhanced_stats_en) {
  8741. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  8742. mac_for_pdev);
  8743. }
  8744. }
  8745. break;
  8746. default:
  8747. /* Nothing needs to be done for other pktlog types */
  8748. break;
  8749. }
  8750. }
  8751. return 0;
  8752. }
  8753. #endif