dp_main.c 267 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_htt.h"
  30. #include "dp_types.h"
  31. #include "dp_internal.h"
  32. #include "dp_tx.h"
  33. #include "dp_tx_desc.h"
  34. #include "dp_rx.h"
  35. #include <cdp_txrx_handle.h>
  36. #include <wlan_cfg.h>
  37. #include "cdp_txrx_cmn_struct.h"
  38. #include "cdp_txrx_stats_struct.h"
  39. #include "cdp_txrx_cmn_reg.h"
  40. #include <qdf_util.h>
  41. #include "dp_peer.h"
  42. #include "dp_rx_mon.h"
  43. #include "htt_stats.h"
  44. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  45. #include "cfg_ucfg_api.h"
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. #include "cdp_txrx_flow_ctrl_v2.h"
  48. #else
  49. static inline void
  50. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  51. {
  52. return;
  53. }
  54. #endif
  55. #include "dp_ipa.h"
  56. #include "dp_cal_client_api.h"
  57. #ifdef CONFIG_MCL
  58. extern int con_mode_monitor;
  59. #ifndef REMOVE_PKT_LOG
  60. #include <pktlog_ac_api.h>
  61. #include <pktlog_ac.h>
  62. #endif
  63. #endif
  64. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
  65. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  66. static struct dp_soc *
  67. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  68. struct ol_if_ops *ol_ops, uint16_t device_id);
  69. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  70. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  71. uint8_t *peer_mac_addr,
  72. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  73. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
  74. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  75. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  76. #define DP_INTR_POLL_TIMER_MS 10
  77. /* Generic AST entry aging timer value */
  78. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  79. /* WDS AST entry aging timer value */
  80. #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
  81. #define DP_WDS_AST_AGING_TIMER_CNT \
  82. ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
  83. #define DP_MCS_LENGTH (6*MAX_MCS)
  84. #define DP_NSS_LENGTH (6*SS_COUNT)
  85. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  86. #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
  87. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  88. #define DP_MAX_MCS_STRING_LEN 30
  89. #define DP_CURR_FW_STATS_AVAIL 19
  90. #define DP_HTT_DBG_EXT_STATS_MAX 256
  91. #define DP_MAX_SLEEP_TIME 100
  92. #ifdef IPA_OFFLOAD
  93. /* Exclude IPA rings from the interrupt context */
  94. #define TX_RING_MASK_VAL 0xb
  95. #define RX_RING_MASK_VAL 0x7
  96. #else
  97. #define TX_RING_MASK_VAL 0xF
  98. #define RX_RING_MASK_VAL 0xF
  99. #endif
  100. #define STR_MAXLEN 64
  101. #define DP_PPDU_STATS_CFG_ALL 0xFFFF
  102. /* PPDU stats mask sent to FW to enable enhanced stats */
  103. #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
  104. /* PPDU stats mask sent to FW to support debug sniffer feature */
  105. #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
  106. /* PPDU stats mask sent to FW to support BPR feature*/
  107. #define DP_PPDU_STATS_CFG_BPR 0x2000
  108. /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
  109. #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
  110. DP_PPDU_STATS_CFG_ENH_STATS)
  111. /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
  112. #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
  113. DP_PPDU_TXLITE_STATS_BITMASK_CFG)
  114. #define RNG_ERR "SRNG setup failed for"
  115. /**
  116. * default_dscp_tid_map - Default DSCP-TID mapping
  117. *
  118. * DSCP TID
  119. * 000000 0
  120. * 001000 1
  121. * 010000 2
  122. * 011000 3
  123. * 100000 4
  124. * 101000 5
  125. * 110000 6
  126. * 111000 7
  127. */
  128. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  129. 0, 0, 0, 0, 0, 0, 0, 0,
  130. 1, 1, 1, 1, 1, 1, 1, 1,
  131. 2, 2, 2, 2, 2, 2, 2, 2,
  132. 3, 3, 3, 3, 3, 3, 3, 3,
  133. 4, 4, 4, 4, 4, 4, 4, 4,
  134. 5, 5, 5, 5, 5, 5, 5, 5,
  135. 6, 6, 6, 6, 6, 6, 6, 6,
  136. 7, 7, 7, 7, 7, 7, 7, 7,
  137. };
  138. /*
  139. * struct dp_rate_debug
  140. *
  141. * @mcs_type: print string for a given mcs
  142. * @valid: valid mcs rate?
  143. */
  144. struct dp_rate_debug {
  145. char mcs_type[DP_MAX_MCS_STRING_LEN];
  146. uint8_t valid;
  147. };
  148. #define MCS_VALID 1
  149. #define MCS_INVALID 0
  150. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  151. {
  152. {"OFDM 48 Mbps", MCS_VALID},
  153. {"OFDM 24 Mbps", MCS_VALID},
  154. {"OFDM 12 Mbps", MCS_VALID},
  155. {"OFDM 6 Mbps ", MCS_VALID},
  156. {"OFDM 54 Mbps", MCS_VALID},
  157. {"OFDM 36 Mbps", MCS_VALID},
  158. {"OFDM 18 Mbps", MCS_VALID},
  159. {"OFDM 9 Mbps ", MCS_VALID},
  160. {"INVALID ", MCS_INVALID},
  161. {"INVALID ", MCS_INVALID},
  162. {"INVALID ", MCS_INVALID},
  163. {"INVALID ", MCS_INVALID},
  164. {"INVALID ", MCS_VALID},
  165. },
  166. {
  167. {"CCK 11 Mbps Long ", MCS_VALID},
  168. {"CCK 5.5 Mbps Long ", MCS_VALID},
  169. {"CCK 2 Mbps Long ", MCS_VALID},
  170. {"CCK 1 Mbps Long ", MCS_VALID},
  171. {"CCK 11 Mbps Short ", MCS_VALID},
  172. {"CCK 5.5 Mbps Short", MCS_VALID},
  173. {"CCK 2 Mbps Short ", MCS_VALID},
  174. {"INVALID ", MCS_INVALID},
  175. {"INVALID ", MCS_INVALID},
  176. {"INVALID ", MCS_INVALID},
  177. {"INVALID ", MCS_INVALID},
  178. {"INVALID ", MCS_INVALID},
  179. {"INVALID ", MCS_VALID},
  180. },
  181. {
  182. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  183. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  184. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  185. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  186. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  187. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  188. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  189. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  190. {"INVALID ", MCS_INVALID},
  191. {"INVALID ", MCS_INVALID},
  192. {"INVALID ", MCS_INVALID},
  193. {"INVALID ", MCS_INVALID},
  194. {"INVALID ", MCS_VALID},
  195. },
  196. {
  197. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  198. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  199. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  200. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  201. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  202. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  203. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  204. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  205. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  206. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  207. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  208. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  209. {"INVALID ", MCS_VALID},
  210. },
  211. {
  212. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  213. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  214. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  215. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  216. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  217. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  218. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  219. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  220. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  221. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  222. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  223. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  224. {"INVALID ", MCS_VALID},
  225. }
  226. };
  227. /**
  228. * dp_cpu_ring_map_type - dp tx cpu ring map
  229. * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
  230. * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
  231. * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
  232. * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
  233. * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
  234. * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
  235. */
  236. enum dp_cpu_ring_map_types {
  237. DP_NSS_DEFAULT_MAP,
  238. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  239. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  240. DP_NSS_DBDC_OFFLOADED_MAP,
  241. DP_NSS_DBTC_OFFLOADED_MAP,
  242. DP_NSS_CPU_RING_MAP_MAX
  243. };
  244. /**
  245. * @brief Cpu to tx ring map
  246. */
  247. #ifdef CONFIG_WIN
  248. static uint8_t
  249. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  250. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  251. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  252. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  253. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  254. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  255. };
  256. #else
  257. static uint8_t
  258. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  259. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  260. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  261. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  262. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  263. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  264. };
  265. #endif
  266. /**
  267. * @brief Select the type of statistics
  268. */
  269. enum dp_stats_type {
  270. STATS_FW = 0,
  271. STATS_HOST = 1,
  272. STATS_TYPE_MAX = 2,
  273. };
  274. /**
  275. * @brief General Firmware statistics options
  276. *
  277. */
  278. enum dp_fw_stats {
  279. TXRX_FW_STATS_INVALID = -1,
  280. };
  281. /**
  282. * dp_stats_mapping_table - Firmware and Host statistics
  283. * currently supported
  284. */
  285. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  286. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  287. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  288. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  289. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  290. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  291. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  292. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  293. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  294. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  295. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  296. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  297. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  298. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  299. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  300. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  301. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  302. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  303. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  304. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  305. /* Last ENUM for HTT FW STATS */
  306. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  307. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  308. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  309. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  310. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  311. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  312. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  313. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  314. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  315. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  316. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  317. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  318. };
  319. /* MCL specific functions */
  320. #ifdef CONFIG_MCL
  321. /**
  322. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  323. * @soc: pointer to dp_soc handle
  324. * @intr_ctx_num: interrupt context number for which mon mask is needed
  325. *
  326. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  327. * This function is returning 0, since in interrupt mode(softirq based RX),
  328. * we donot want to process monitor mode rings in a softirq.
  329. *
  330. * So, in case packet log is enabled for SAP/STA/P2P modes,
  331. * regular interrupt processing will not process monitor mode rings. It would be
  332. * done in a separate timer context.
  333. *
  334. * Return: 0
  335. */
  336. static inline
  337. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  338. {
  339. return 0;
  340. }
  341. /*
  342. * dp_service_mon_rings()- timer to reap monitor rings
  343. * reqd as we are not getting ppdu end interrupts
  344. * @arg: SoC Handle
  345. *
  346. * Return:
  347. *
  348. */
  349. static void dp_service_mon_rings(void *arg)
  350. {
  351. struct dp_soc *soc = (struct dp_soc *)arg;
  352. int ring = 0, work_done, mac_id;
  353. struct dp_pdev *pdev = NULL;
  354. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  355. pdev = soc->pdev_list[ring];
  356. if (!pdev)
  357. continue;
  358. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  359. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  360. pdev->pdev_id);
  361. work_done = dp_mon_process(soc, mac_for_pdev,
  362. QCA_NAPI_BUDGET);
  363. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  364. FL("Reaped %d descs from Monitor rings"),
  365. work_done);
  366. }
  367. }
  368. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  369. }
  370. #ifndef REMOVE_PKT_LOG
  371. /**
  372. * dp_pkt_log_init() - API to initialize packet log
  373. * @ppdev: physical device handle
  374. * @scn: HIF context
  375. *
  376. * Return: none
  377. */
  378. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  379. {
  380. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  381. if (handle->pkt_log_init) {
  382. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  383. "%s: Packet log not initialized", __func__);
  384. return;
  385. }
  386. pktlog_sethandle(&handle->pl_dev, scn);
  387. pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
  388. if (pktlogmod_init(scn)) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "%s: pktlogmod_init failed", __func__);
  391. handle->pkt_log_init = false;
  392. } else {
  393. handle->pkt_log_init = true;
  394. }
  395. }
  396. /**
  397. * dp_pkt_log_con_service() - connect packet log service
  398. * @ppdev: physical device handle
  399. * @scn: device context
  400. *
  401. * Return: none
  402. */
  403. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  404. {
  405. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  406. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  407. pktlog_htc_attach();
  408. }
  409. /**
  410. * dp_pktlogmod_exit() - API to cleanup pktlog info
  411. * @handle: Pdev handle
  412. *
  413. * Return: none
  414. */
  415. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  416. {
  417. void *scn = (void *)handle->soc->hif_handle;
  418. if (!scn) {
  419. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  420. "%s: Invalid hif(scn) handle", __func__);
  421. return;
  422. }
  423. pktlogmod_exit(scn);
  424. handle->pkt_log_init = false;
  425. }
  426. #endif
  427. #else
  428. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  429. /**
  430. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  431. * @soc: pointer to dp_soc handle
  432. * @intr_ctx_num: interrupt context number for which mon mask is needed
  433. *
  434. * Return: mon mask value
  435. */
  436. static inline
  437. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  438. {
  439. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  440. }
  441. #endif
  442. /**
  443. * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
  444. * @cdp_opaque_vdev: pointer to cdp_vdev
  445. *
  446. * Return: pointer to dp_vdev
  447. */
  448. static
  449. struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
  450. {
  451. return (struct dp_vdev *)cdp_opaque_vdev;
  452. }
  453. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  454. struct cdp_peer *peer_hdl,
  455. uint8_t *mac_addr,
  456. enum cdp_txrx_ast_entry_type type,
  457. uint32_t flags)
  458. {
  459. return dp_peer_add_ast((struct dp_soc *)soc_hdl,
  460. (struct dp_peer *)peer_hdl,
  461. mac_addr,
  462. type,
  463. flags);
  464. }
  465. static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
  466. void *ast_entry_hdl)
  467. {
  468. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  469. qdf_spin_lock_bh(&soc->ast_lock);
  470. dp_peer_del_ast((struct dp_soc *)soc_hdl,
  471. (struct dp_ast_entry *)ast_entry_hdl);
  472. qdf_spin_unlock_bh(&soc->ast_lock);
  473. }
  474. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  475. struct cdp_peer *peer_hdl,
  476. uint8_t *wds_macaddr,
  477. uint32_t flags)
  478. {
  479. int status = -1;
  480. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  481. struct dp_ast_entry *ast_entry = NULL;
  482. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  483. qdf_spin_lock_bh(&soc->ast_lock);
  484. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  485. peer->vdev->pdev->pdev_id);
  486. if (ast_entry) {
  487. status = dp_peer_update_ast(soc,
  488. peer,
  489. ast_entry, flags);
  490. }
  491. qdf_spin_unlock_bh(&soc->ast_lock);
  492. return status;
  493. }
  494. /*
  495. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  496. * @soc_handle: Datapath SOC handle
  497. * @wds_macaddr: WDS entry MAC Address
  498. * Return: None
  499. */
  500. static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  501. uint8_t *wds_macaddr, void *vdev_handle)
  502. {
  503. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  504. struct dp_ast_entry *ast_entry = NULL;
  505. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  506. qdf_spin_lock_bh(&soc->ast_lock);
  507. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  508. vdev->pdev->pdev_id);
  509. if (ast_entry) {
  510. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  511. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  512. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
  513. ast_entry->is_active = TRUE;
  514. }
  515. }
  516. qdf_spin_unlock_bh(&soc->ast_lock);
  517. }
  518. /*
  519. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  520. * @soc: Datapath SOC handle
  521. *
  522. * Return: None
  523. */
  524. static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  525. void *vdev_hdl)
  526. {
  527. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  528. struct dp_pdev *pdev;
  529. struct dp_vdev *vdev;
  530. struct dp_peer *peer;
  531. struct dp_ast_entry *ase, *temp_ase;
  532. int i;
  533. qdf_spin_lock_bh(&soc->ast_lock);
  534. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  535. pdev = soc->pdev_list[i];
  536. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  537. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  538. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  539. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  540. if ((ase->type ==
  541. CDP_TXRX_AST_TYPE_STATIC) ||
  542. (ase->type ==
  543. CDP_TXRX_AST_TYPE_SELF) ||
  544. (ase->type ==
  545. CDP_TXRX_AST_TYPE_STA_BSS))
  546. continue;
  547. ase->is_active = TRUE;
  548. }
  549. }
  550. }
  551. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  552. }
  553. qdf_spin_unlock_bh(&soc->ast_lock);
  554. }
  555. /*
  556. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  557. * @soc: Datapath SOC handle
  558. *
  559. * Return: None
  560. */
  561. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  562. {
  563. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  564. struct dp_pdev *pdev;
  565. struct dp_vdev *vdev;
  566. struct dp_peer *peer;
  567. struct dp_ast_entry *ase, *temp_ase;
  568. int i;
  569. qdf_spin_lock_bh(&soc->ast_lock);
  570. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  571. pdev = soc->pdev_list[i];
  572. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  573. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  574. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  575. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  576. if ((ase->type ==
  577. CDP_TXRX_AST_TYPE_STATIC) ||
  578. (ase->type ==
  579. CDP_TXRX_AST_TYPE_SELF) ||
  580. (ase->type ==
  581. CDP_TXRX_AST_TYPE_STA_BSS))
  582. continue;
  583. dp_peer_del_ast(soc, ase);
  584. }
  585. }
  586. }
  587. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  588. }
  589. qdf_spin_unlock_bh(&soc->ast_lock);
  590. }
  591. static void *dp_peer_ast_hash_find_soc_wifi3(struct cdp_soc_t *soc_hdl,
  592. uint8_t *ast_mac_addr)
  593. {
  594. struct dp_ast_entry *ast_entry;
  595. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  596. qdf_spin_lock_bh(&soc->ast_lock);
  597. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  598. qdf_spin_unlock_bh(&soc->ast_lock);
  599. return (void *)ast_entry;
  600. }
  601. static void *dp_peer_ast_hash_find_by_pdevid_wifi3(struct cdp_soc_t *soc_hdl,
  602. uint8_t *ast_mac_addr,
  603. uint8_t pdev_id)
  604. {
  605. struct dp_ast_entry *ast_entry;
  606. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  607. qdf_spin_lock_bh(&soc->ast_lock);
  608. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  609. qdf_spin_unlock_bh(&soc->ast_lock);
  610. return (void *)ast_entry;
  611. }
  612. static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
  613. void *ast_entry_hdl)
  614. {
  615. return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
  616. (struct dp_ast_entry *)ast_entry_hdl);
  617. }
  618. static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
  619. void *ast_entry_hdl)
  620. {
  621. return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
  622. (struct dp_ast_entry *)ast_entry_hdl);
  623. }
  624. static void dp_peer_ast_set_type_wifi3(
  625. struct cdp_soc_t *soc_hdl,
  626. void *ast_entry_hdl,
  627. enum cdp_txrx_ast_entry_type type)
  628. {
  629. dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
  630. (struct dp_ast_entry *)ast_entry_hdl,
  631. type);
  632. }
  633. static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
  634. struct cdp_soc_t *soc_hdl,
  635. void *ast_entry_hdl)
  636. {
  637. return ((struct dp_ast_entry *)ast_entry_hdl)->type;
  638. }
  639. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  640. void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  641. void *ast_entry,
  642. void *cp_ctx)
  643. {
  644. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  645. qdf_spin_lock_bh(&soc->ast_lock);
  646. dp_peer_ast_set_cp_ctx(soc,
  647. (struct dp_ast_entry *)ast_entry, cp_ctx);
  648. qdf_spin_unlock_bh(&soc->ast_lock);
  649. }
  650. void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  651. void *ast_entry)
  652. {
  653. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  654. void *cp_ctx = NULL;
  655. qdf_spin_lock_bh(&soc->ast_lock);
  656. cp_ctx = dp_peer_ast_get_cp_ctx(soc,
  657. (struct dp_ast_entry *)ast_entry);
  658. qdf_spin_unlock_bh(&soc->ast_lock);
  659. return cp_ctx;
  660. }
  661. bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
  662. void *ast_entry)
  663. {
  664. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  665. bool wmi_sent = false;
  666. qdf_spin_lock_bh(&soc->ast_lock);
  667. wmi_sent = dp_peer_ast_get_del_cmd_sent(soc,
  668. (struct dp_ast_entry *)
  669. ast_entry);
  670. qdf_spin_unlock_bh(&soc->ast_lock);
  671. return wmi_sent;
  672. }
  673. void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
  674. void *ast_entry)
  675. {
  676. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  677. qdf_spin_lock_bh(&soc->ast_lock);
  678. dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
  679. qdf_spin_unlock_bh(&soc->ast_lock);
  680. }
  681. #endif
  682. static struct cdp_peer *dp_peer_ast_get_peer_wifi3(
  683. struct cdp_soc_t *soc_hdl,
  684. void *ast_entry_hdl)
  685. {
  686. return (struct cdp_peer *)((struct dp_ast_entry *)ast_entry_hdl)->peer;
  687. }
  688. static uint32_t dp_peer_ast_get_nexhop_peer_id_wifi3(
  689. struct cdp_soc_t *soc_hdl,
  690. void *ast_entry_hdl)
  691. {
  692. return ((struct dp_ast_entry *)ast_entry_hdl)->peer->peer_ids[0];
  693. }
  694. /**
  695. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  696. * @ring_num: ring num of the ring being queried
  697. * @grp_mask: the grp_mask array for the ring type in question.
  698. *
  699. * The grp_mask array is indexed by group number and the bit fields correspond
  700. * to ring numbers. We are finding which interrupt group a ring belongs to.
  701. *
  702. * Return: the index in the grp_mask array with the ring number.
  703. * -QDF_STATUS_E_NOENT if no entry is found
  704. */
  705. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  706. {
  707. int ext_group_num;
  708. int mask = 1 << ring_num;
  709. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  710. ext_group_num++) {
  711. if (mask & grp_mask[ext_group_num])
  712. return ext_group_num;
  713. }
  714. return -QDF_STATUS_E_NOENT;
  715. }
  716. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  717. enum hal_ring_type ring_type,
  718. int ring_num)
  719. {
  720. int *grp_mask;
  721. switch (ring_type) {
  722. case WBM2SW_RELEASE:
  723. /* dp_tx_comp_handler - soc->tx_comp_ring */
  724. if (ring_num < 3)
  725. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  726. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  727. else if (ring_num == 3) {
  728. /* sw treats this as a separate ring type */
  729. grp_mask = &soc->wlan_cfg_ctx->
  730. int_rx_wbm_rel_ring_mask[0];
  731. ring_num = 0;
  732. } else {
  733. qdf_assert(0);
  734. return -QDF_STATUS_E_NOENT;
  735. }
  736. break;
  737. case REO_EXCEPTION:
  738. /* dp_rx_err_process - &soc->reo_exception_ring */
  739. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  740. break;
  741. case REO_DST:
  742. /* dp_rx_process - soc->reo_dest_ring */
  743. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  744. break;
  745. case REO_STATUS:
  746. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  747. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  748. break;
  749. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  750. case RXDMA_MONITOR_STATUS:
  751. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  752. case RXDMA_MONITOR_DST:
  753. /* dp_mon_process */
  754. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  755. break;
  756. case RXDMA_DST:
  757. /* dp_rxdma_err_process */
  758. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  759. break;
  760. case RXDMA_BUF:
  761. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  762. break;
  763. case RXDMA_MONITOR_BUF:
  764. /* TODO: support low_thresh interrupt */
  765. return -QDF_STATUS_E_NOENT;
  766. break;
  767. case TCL_DATA:
  768. case TCL_CMD:
  769. case REO_CMD:
  770. case SW2WBM_RELEASE:
  771. case WBM_IDLE_LINK:
  772. /* normally empty SW_TO_HW rings */
  773. return -QDF_STATUS_E_NOENT;
  774. break;
  775. case TCL_STATUS:
  776. case REO_REINJECT:
  777. /* misc unused rings */
  778. return -QDF_STATUS_E_NOENT;
  779. break;
  780. case CE_SRC:
  781. case CE_DST:
  782. case CE_DST_STATUS:
  783. /* CE_rings - currently handled by hif */
  784. default:
  785. return -QDF_STATUS_E_NOENT;
  786. break;
  787. }
  788. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  789. }
  790. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  791. *ring_params, int ring_type, int ring_num)
  792. {
  793. int msi_group_number;
  794. int msi_data_count;
  795. int ret;
  796. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  797. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  798. &msi_data_count, &msi_data_start,
  799. &msi_irq_start);
  800. if (ret)
  801. return;
  802. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  803. ring_num);
  804. if (msi_group_number < 0) {
  805. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  806. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  807. ring_type, ring_num);
  808. ring_params->msi_addr = 0;
  809. ring_params->msi_data = 0;
  810. return;
  811. }
  812. if (msi_group_number > msi_data_count) {
  813. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  814. FL("2 msi_groups will share an msi; msi_group_num %d"),
  815. msi_group_number);
  816. QDF_ASSERT(0);
  817. }
  818. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  819. ring_params->msi_addr = addr_low;
  820. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  821. ring_params->msi_data = (msi_group_number % msi_data_count)
  822. + msi_data_start;
  823. ring_params->flags |= HAL_SRNG_MSI_INTR;
  824. }
  825. /**
  826. * dp_print_ast_stats() - Dump AST table contents
  827. * @soc: Datapath soc handle
  828. *
  829. * return void
  830. */
  831. #ifdef FEATURE_AST
  832. static void dp_print_ast_stats(struct dp_soc *soc)
  833. {
  834. uint8_t i;
  835. uint8_t num_entries = 0;
  836. struct dp_vdev *vdev;
  837. struct dp_pdev *pdev;
  838. struct dp_peer *peer;
  839. struct dp_ast_entry *ase, *tmp_ase;
  840. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  841. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  842. "DA", "HMWDS_SEC"};
  843. DP_PRINT_STATS("AST Stats:");
  844. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  845. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  846. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  847. DP_PRINT_STATS("AST Table:");
  848. qdf_spin_lock_bh(&soc->ast_lock);
  849. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  850. pdev = soc->pdev_list[i];
  851. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  852. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  853. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  854. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  855. DP_PRINT_STATS("%6d mac_addr = %pM"
  856. " peer_mac_addr = %pM"
  857. " type = %s"
  858. " next_hop = %d"
  859. " is_active = %d"
  860. " is_bss = %d"
  861. " ast_idx = %d"
  862. " ast_hash = %d"
  863. " pdev_id = %d"
  864. " vdev_id = %d"
  865. " del_cmd_sent = %d",
  866. ++num_entries,
  867. ase->mac_addr.raw,
  868. ase->peer->mac_addr.raw,
  869. type[ase->type],
  870. ase->next_hop,
  871. ase->is_active,
  872. ase->is_bss,
  873. ase->ast_idx,
  874. ase->ast_hash_value,
  875. ase->pdev_id,
  876. ase->vdev_id,
  877. ase->del_cmd_sent);
  878. }
  879. }
  880. }
  881. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  882. }
  883. qdf_spin_unlock_bh(&soc->ast_lock);
  884. }
  885. #else
  886. static void dp_print_ast_stats(struct dp_soc *soc)
  887. {
  888. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  889. return;
  890. }
  891. #endif
  892. /**
  893. * dp_print_peer_table() - Dump all Peer stats
  894. * @vdev: Datapath Vdev handle
  895. *
  896. * return void
  897. */
  898. static void dp_print_peer_table(struct dp_vdev *vdev)
  899. {
  900. struct dp_peer *peer = NULL;
  901. DP_PRINT_STATS("Dumping Peer Table Stats:");
  902. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  903. if (!peer) {
  904. DP_PRINT_STATS("Invalid Peer");
  905. return;
  906. }
  907. DP_PRINT_STATS(" peer_mac_addr = %pM nawds_enabled = %d",
  908. peer->mac_addr.raw,
  909. peer->nawds_enabled);
  910. DP_PRINT_STATS(" bss_peer = %d wapi = %d wds_enabled = %d",
  911. peer->bss_peer,
  912. peer->wapi,
  913. peer->wds_enabled);
  914. DP_PRINT_STATS(" delete in progress = %d peer id = %d",
  915. peer->delete_in_progress,
  916. peer->peer_ids[0]);
  917. }
  918. }
  919. /*
  920. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  921. */
  922. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  923. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  924. {
  925. void *hal_soc = soc->hal_soc;
  926. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  927. /* TODO: See if we should get align size from hal */
  928. uint32_t ring_base_align = 8;
  929. struct hal_srng_params ring_params;
  930. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  931. /* TODO: Currently hal layer takes care of endianness related settings.
  932. * See if these settings need to passed from DP layer
  933. */
  934. ring_params.flags = 0;
  935. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  936. srng->hal_srng = NULL;
  937. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  938. srng->num_entries = num_entries;
  939. if (!soc->dp_soc_reinit) {
  940. srng->base_vaddr_unaligned =
  941. qdf_mem_alloc_consistent(soc->osdev,
  942. soc->osdev->dev,
  943. srng->alloc_size,
  944. &srng->base_paddr_unaligned);
  945. }
  946. if (!srng->base_vaddr_unaligned) {
  947. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  948. FL("alloc failed - ring_type: %d, ring_num %d"),
  949. ring_type, ring_num);
  950. return QDF_STATUS_E_NOMEM;
  951. }
  952. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  953. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  954. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  955. ((unsigned long)(ring_params.ring_base_vaddr) -
  956. (unsigned long)srng->base_vaddr_unaligned);
  957. ring_params.num_entries = num_entries;
  958. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  959. FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
  960. ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
  961. (void *)ring_params.ring_base_paddr, ring_params.num_entries);
  962. if (soc->intr_mode == DP_INTR_MSI) {
  963. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  964. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  965. FL("Using MSI for ring_type: %d, ring_num %d"),
  966. ring_type, ring_num);
  967. } else {
  968. ring_params.msi_data = 0;
  969. ring_params.msi_addr = 0;
  970. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  971. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  972. ring_type, ring_num);
  973. }
  974. /*
  975. * Setup interrupt timer and batch counter thresholds for
  976. * interrupt mitigation based on ring type
  977. */
  978. if (ring_type == REO_DST) {
  979. ring_params.intr_timer_thres_us =
  980. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  981. ring_params.intr_batch_cntr_thres_entries =
  982. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  983. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  984. ring_params.intr_timer_thres_us =
  985. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  986. ring_params.intr_batch_cntr_thres_entries =
  987. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  988. } else {
  989. ring_params.intr_timer_thres_us =
  990. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  991. ring_params.intr_batch_cntr_thres_entries =
  992. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  993. }
  994. /* Enable low threshold interrupts for rx buffer rings (regular and
  995. * monitor buffer rings.
  996. * TODO: See if this is required for any other ring
  997. */
  998. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  999. (ring_type == RXDMA_MONITOR_STATUS)) {
  1000. /* TODO: Setting low threshold to 1/8th of ring size
  1001. * see if this needs to be configurable
  1002. */
  1003. ring_params.low_threshold = num_entries >> 3;
  1004. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1005. ring_params.intr_timer_thres_us =
  1006. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1007. ring_params.intr_batch_cntr_thres_entries = 0;
  1008. }
  1009. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1010. mac_id, &ring_params);
  1011. if (!srng->hal_srng) {
  1012. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1013. srng->alloc_size,
  1014. srng->base_vaddr_unaligned,
  1015. srng->base_paddr_unaligned, 0);
  1016. }
  1017. return 0;
  1018. }
  1019. /*
  1020. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1021. * @soc: DP SOC handle
  1022. * @srng: source ring structure
  1023. * @ring_type: type of ring
  1024. * @ring_num: ring number
  1025. *
  1026. * Return: None
  1027. */
  1028. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1029. int ring_type, int ring_num)
  1030. {
  1031. }
  1032. /**
  1033. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1034. * Any buffers allocated and attached to ring entries are expected to be freed
  1035. * before calling this function.
  1036. */
  1037. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1038. int ring_type, int ring_num)
  1039. {
  1040. if (!srng->hal_srng) {
  1041. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1042. FL("Ring type: %d, num:%d not setup"),
  1043. ring_type, ring_num);
  1044. return;
  1045. }
  1046. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1047. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1048. srng->alloc_size,
  1049. srng->base_vaddr_unaligned,
  1050. srng->base_paddr_unaligned, 0);
  1051. srng->hal_srng = NULL;
  1052. }
  1053. /* TODO: Need this interface from HIF */
  1054. void *hif_get_hal_handle(void *hif_handle);
  1055. /*
  1056. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1057. * @dp_ctx: DP SOC handle
  1058. * @budget: Number of frames/descriptors that can be processed in one shot
  1059. *
  1060. * Return: remaining budget/quota for the soc device
  1061. */
  1062. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1063. {
  1064. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1065. struct dp_soc *soc = int_ctx->soc;
  1066. int ring = 0;
  1067. uint32_t work_done = 0;
  1068. int budget = dp_budget;
  1069. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1070. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1071. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1072. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1073. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1074. uint32_t remaining_quota = dp_budget;
  1075. struct dp_pdev *pdev = NULL;
  1076. int mac_id;
  1077. /* Process Tx completion interrupts first to return back buffers */
  1078. while (tx_mask) {
  1079. if (tx_mask & 0x1) {
  1080. work_done = dp_tx_comp_handler(soc,
  1081. soc->tx_comp_ring[ring].hal_srng,
  1082. remaining_quota);
  1083. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1084. "tx mask 0x%x ring %d, budget %d, work_done %d",
  1085. tx_mask, ring, budget, work_done);
  1086. budget -= work_done;
  1087. if (budget <= 0)
  1088. goto budget_done;
  1089. remaining_quota = budget;
  1090. }
  1091. tx_mask = tx_mask >> 1;
  1092. ring++;
  1093. }
  1094. /* Process REO Exception ring interrupt */
  1095. if (rx_err_mask) {
  1096. work_done = dp_rx_err_process(soc,
  1097. soc->reo_exception_ring.hal_srng,
  1098. remaining_quota);
  1099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1100. "REO Exception Ring: work_done %d budget %d",
  1101. work_done, budget);
  1102. budget -= work_done;
  1103. if (budget <= 0) {
  1104. goto budget_done;
  1105. }
  1106. remaining_quota = budget;
  1107. }
  1108. /* Process Rx WBM release ring interrupt */
  1109. if (rx_wbm_rel_mask) {
  1110. work_done = dp_rx_wbm_err_process(soc,
  1111. soc->rx_rel_ring.hal_srng, remaining_quota);
  1112. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1113. "WBM Release Ring: work_done %d budget %d",
  1114. work_done, budget);
  1115. budget -= work_done;
  1116. if (budget <= 0) {
  1117. goto budget_done;
  1118. }
  1119. remaining_quota = budget;
  1120. }
  1121. /* Process Rx interrupts */
  1122. if (rx_mask) {
  1123. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1124. if (rx_mask & (1 << ring)) {
  1125. work_done = dp_rx_process(int_ctx,
  1126. soc->reo_dest_ring[ring].hal_srng,
  1127. ring,
  1128. remaining_quota);
  1129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1130. "rx mask 0x%x ring %d, work_done %d budget %d",
  1131. rx_mask, ring, work_done, budget);
  1132. budget -= work_done;
  1133. if (budget <= 0)
  1134. goto budget_done;
  1135. remaining_quota = budget;
  1136. }
  1137. }
  1138. }
  1139. if (reo_status_mask)
  1140. dp_reo_status_ring_handler(soc);
  1141. /* Process LMAC interrupts */
  1142. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1143. pdev = soc->pdev_list[ring];
  1144. if (pdev == NULL)
  1145. continue;
  1146. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1147. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1148. pdev->pdev_id);
  1149. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1150. work_done = dp_mon_process(soc, mac_for_pdev,
  1151. remaining_quota);
  1152. budget -= work_done;
  1153. if (budget <= 0)
  1154. goto budget_done;
  1155. remaining_quota = budget;
  1156. }
  1157. if (int_ctx->rxdma2host_ring_mask &
  1158. (1 << mac_for_pdev)) {
  1159. work_done = dp_rxdma_err_process(soc,
  1160. mac_for_pdev,
  1161. remaining_quota);
  1162. budget -= work_done;
  1163. if (budget <= 0)
  1164. goto budget_done;
  1165. remaining_quota = budget;
  1166. }
  1167. if (int_ctx->host2rxdma_ring_mask &
  1168. (1 << mac_for_pdev)) {
  1169. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1170. union dp_rx_desc_list_elem_t *tail = NULL;
  1171. struct dp_srng *rx_refill_buf_ring =
  1172. &pdev->rx_refill_buf_ring;
  1173. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1174. 1);
  1175. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1176. rx_refill_buf_ring,
  1177. &soc->rx_desc_buf[mac_for_pdev], 0,
  1178. &desc_list, &tail);
  1179. }
  1180. }
  1181. }
  1182. qdf_lro_flush(int_ctx->lro_ctx);
  1183. budget_done:
  1184. return dp_budget - budget;
  1185. }
  1186. /* dp_interrupt_timer()- timer poll for interrupts
  1187. *
  1188. * @arg: SoC Handle
  1189. *
  1190. * Return:
  1191. *
  1192. */
  1193. static void dp_interrupt_timer(void *arg)
  1194. {
  1195. struct dp_soc *soc = (struct dp_soc *) arg;
  1196. int i;
  1197. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1198. for (i = 0;
  1199. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1200. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1201. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1202. }
  1203. }
  1204. /*
  1205. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1206. * @txrx_soc: DP SOC handle
  1207. *
  1208. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1209. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1210. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1211. *
  1212. * Return: 0 for success, nonzero for failure.
  1213. */
  1214. static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
  1215. {
  1216. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1217. int i;
  1218. soc->intr_mode = DP_INTR_POLL;
  1219. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1220. soc->intr_ctx[i].dp_intr_id = i;
  1221. soc->intr_ctx[i].tx_ring_mask =
  1222. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1223. soc->intr_ctx[i].rx_ring_mask =
  1224. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1225. soc->intr_ctx[i].rx_mon_ring_mask =
  1226. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1227. soc->intr_ctx[i].rx_err_ring_mask =
  1228. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1229. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1230. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1231. soc->intr_ctx[i].reo_status_ring_mask =
  1232. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1233. soc->intr_ctx[i].rxdma2host_ring_mask =
  1234. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1235. soc->intr_ctx[i].soc = soc;
  1236. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1237. }
  1238. qdf_timer_init(soc->osdev, &soc->int_timer,
  1239. dp_interrupt_timer, (void *)soc,
  1240. QDF_TIMER_TYPE_WAKE_APPS);
  1241. return QDF_STATUS_SUCCESS;
  1242. }
  1243. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  1244. #if defined(CONFIG_MCL)
  1245. /*
  1246. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1247. * @txrx_soc: DP SOC handle
  1248. *
  1249. * Call the appropriate attach function based on the mode of operation.
  1250. * This is a WAR for enabling monitor mode.
  1251. *
  1252. * Return: 0 for success. nonzero for failure.
  1253. */
  1254. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1255. {
  1256. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1257. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1258. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  1259. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1260. "%s: Poll mode", __func__);
  1261. return dp_soc_attach_poll(txrx_soc);
  1262. } else {
  1263. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1264. "%s: Interrupt mode", __func__);
  1265. return dp_soc_interrupt_attach(txrx_soc);
  1266. }
  1267. }
  1268. #else
  1269. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1270. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1271. {
  1272. return dp_soc_attach_poll(txrx_soc);
  1273. }
  1274. #else
  1275. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1276. {
  1277. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1278. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1279. return dp_soc_attach_poll(txrx_soc);
  1280. else
  1281. return dp_soc_interrupt_attach(txrx_soc);
  1282. }
  1283. #endif
  1284. #endif
  1285. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1286. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1287. {
  1288. int j;
  1289. int num_irq = 0;
  1290. int tx_mask =
  1291. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1292. int rx_mask =
  1293. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1294. int rx_mon_mask =
  1295. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1296. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1297. soc->wlan_cfg_ctx, intr_ctx_num);
  1298. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1299. soc->wlan_cfg_ctx, intr_ctx_num);
  1300. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1301. soc->wlan_cfg_ctx, intr_ctx_num);
  1302. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1303. soc->wlan_cfg_ctx, intr_ctx_num);
  1304. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1305. soc->wlan_cfg_ctx, intr_ctx_num);
  1306. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1307. soc->wlan_cfg_ctx, intr_ctx_num);
  1308. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1309. if (tx_mask & (1 << j)) {
  1310. irq_id_map[num_irq++] =
  1311. (wbm2host_tx_completions_ring1 - j);
  1312. }
  1313. if (rx_mask & (1 << j)) {
  1314. irq_id_map[num_irq++] =
  1315. (reo2host_destination_ring1 - j);
  1316. }
  1317. if (rxdma2host_ring_mask & (1 << j)) {
  1318. irq_id_map[num_irq++] =
  1319. rxdma2host_destination_ring_mac1 -
  1320. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1321. }
  1322. if (host2rxdma_ring_mask & (1 << j)) {
  1323. irq_id_map[num_irq++] =
  1324. host2rxdma_host_buf_ring_mac1 -
  1325. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1326. }
  1327. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1328. irq_id_map[num_irq++] =
  1329. host2rxdma_monitor_ring1 -
  1330. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1331. }
  1332. if (rx_mon_mask & (1 << j)) {
  1333. irq_id_map[num_irq++] =
  1334. ppdu_end_interrupts_mac1 -
  1335. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1336. irq_id_map[num_irq++] =
  1337. rxdma2host_monitor_status_ring_mac1 -
  1338. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1339. }
  1340. if (rx_wbm_rel_ring_mask & (1 << j))
  1341. irq_id_map[num_irq++] = wbm2host_rx_release;
  1342. if (rx_err_ring_mask & (1 << j))
  1343. irq_id_map[num_irq++] = reo2host_exception;
  1344. if (reo_status_ring_mask & (1 << j))
  1345. irq_id_map[num_irq++] = reo2host_status;
  1346. }
  1347. *num_irq_r = num_irq;
  1348. }
  1349. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1350. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1351. int msi_vector_count, int msi_vector_start)
  1352. {
  1353. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1354. soc->wlan_cfg_ctx, intr_ctx_num);
  1355. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1356. soc->wlan_cfg_ctx, intr_ctx_num);
  1357. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1358. soc->wlan_cfg_ctx, intr_ctx_num);
  1359. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1360. soc->wlan_cfg_ctx, intr_ctx_num);
  1361. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1362. soc->wlan_cfg_ctx, intr_ctx_num);
  1363. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1364. soc->wlan_cfg_ctx, intr_ctx_num);
  1365. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1366. soc->wlan_cfg_ctx, intr_ctx_num);
  1367. unsigned int vector =
  1368. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1369. int num_irq = 0;
  1370. soc->intr_mode = DP_INTR_MSI;
  1371. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1372. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1373. irq_id_map[num_irq++] =
  1374. pld_get_msi_irq(soc->osdev->dev, vector);
  1375. *num_irq_r = num_irq;
  1376. }
  1377. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1378. int *irq_id_map, int *num_irq)
  1379. {
  1380. int msi_vector_count, ret;
  1381. uint32_t msi_base_data, msi_vector_start;
  1382. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1383. &msi_vector_count,
  1384. &msi_base_data,
  1385. &msi_vector_start);
  1386. if (ret)
  1387. return dp_soc_interrupt_map_calculate_integrated(soc,
  1388. intr_ctx_num, irq_id_map, num_irq);
  1389. else
  1390. dp_soc_interrupt_map_calculate_msi(soc,
  1391. intr_ctx_num, irq_id_map, num_irq,
  1392. msi_vector_count, msi_vector_start);
  1393. }
  1394. /*
  1395. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1396. * @txrx_soc: DP SOC handle
  1397. *
  1398. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1399. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1400. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1401. *
  1402. * Return: 0 for success. nonzero for failure.
  1403. */
  1404. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  1405. {
  1406. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1407. int i = 0;
  1408. int num_irq = 0;
  1409. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1410. int ret = 0;
  1411. /* Map of IRQ ids registered with one interrupt context */
  1412. int irq_id_map[HIF_MAX_GRP_IRQ];
  1413. int tx_mask =
  1414. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1415. int rx_mask =
  1416. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1417. int rx_mon_mask =
  1418. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1419. int rx_err_ring_mask =
  1420. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1421. int rx_wbm_rel_ring_mask =
  1422. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1423. int reo_status_ring_mask =
  1424. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1425. int rxdma2host_ring_mask =
  1426. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1427. int host2rxdma_ring_mask =
  1428. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1429. int host2rxdma_mon_ring_mask =
  1430. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1431. soc->wlan_cfg_ctx, i);
  1432. soc->intr_ctx[i].dp_intr_id = i;
  1433. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1434. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1435. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1436. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1437. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1438. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1439. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1440. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1441. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1442. host2rxdma_mon_ring_mask;
  1443. soc->intr_ctx[i].soc = soc;
  1444. num_irq = 0;
  1445. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1446. &num_irq);
  1447. ret = hif_register_ext_group(soc->hif_handle,
  1448. num_irq, irq_id_map, dp_service_srngs,
  1449. &soc->intr_ctx[i], "dp_intr",
  1450. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1451. if (ret) {
  1452. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1453. FL("failed, ret = %d"), ret);
  1454. return QDF_STATUS_E_FAILURE;
  1455. }
  1456. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1457. }
  1458. hif_configure_ext_group_interrupts(soc->hif_handle);
  1459. return QDF_STATUS_SUCCESS;
  1460. }
  1461. /*
  1462. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1463. * @txrx_soc: DP SOC handle
  1464. *
  1465. * Return: void
  1466. */
  1467. static void dp_soc_interrupt_detach(void *txrx_soc)
  1468. {
  1469. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1470. int i;
  1471. if (soc->intr_mode == DP_INTR_POLL) {
  1472. qdf_timer_stop(&soc->int_timer);
  1473. qdf_timer_free(&soc->int_timer);
  1474. } else {
  1475. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1476. }
  1477. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1478. soc->intr_ctx[i].tx_ring_mask = 0;
  1479. soc->intr_ctx[i].rx_ring_mask = 0;
  1480. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1481. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1482. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1483. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1484. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1485. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1486. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1487. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1488. }
  1489. }
  1490. #define AVG_MAX_MPDUS_PER_TID 128
  1491. #define AVG_TIDS_PER_CLIENT 2
  1492. #define AVG_FLOWS_PER_TID 2
  1493. #define AVG_MSDUS_PER_FLOW 128
  1494. #define AVG_MSDUS_PER_MPDU 4
  1495. /*
  1496. * Allocate and setup link descriptor pool that will be used by HW for
  1497. * various link and queue descriptors and managed by WBM
  1498. */
  1499. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1500. {
  1501. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1502. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1503. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1504. uint32_t num_mpdus_per_link_desc =
  1505. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1506. uint32_t num_msdus_per_link_desc =
  1507. hal_num_msdus_per_link_desc(soc->hal_soc);
  1508. uint32_t num_mpdu_links_per_queue_desc =
  1509. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1510. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1511. uint32_t total_link_descs, total_mem_size;
  1512. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1513. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1514. uint32_t num_link_desc_banks;
  1515. uint32_t last_bank_size = 0;
  1516. uint32_t entry_size, num_entries;
  1517. int i;
  1518. uint32_t desc_id = 0;
  1519. qdf_dma_addr_t *baseaddr = NULL;
  1520. /* Only Tx queue descriptors are allocated from common link descriptor
  1521. * pool Rx queue descriptors are not included in this because (REO queue
  1522. * extension descriptors) they are expected to be allocated contiguously
  1523. * with REO queue descriptors
  1524. */
  1525. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1526. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1527. num_mpdu_queue_descs = num_mpdu_link_descs /
  1528. num_mpdu_links_per_queue_desc;
  1529. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1530. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1531. num_msdus_per_link_desc;
  1532. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1533. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1534. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1535. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1536. /* Round up to power of 2 */
  1537. total_link_descs = 1;
  1538. while (total_link_descs < num_entries)
  1539. total_link_descs <<= 1;
  1540. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1541. FL("total_link_descs: %u, link_desc_size: %d"),
  1542. total_link_descs, link_desc_size);
  1543. total_mem_size = total_link_descs * link_desc_size;
  1544. total_mem_size += link_desc_align;
  1545. if (total_mem_size <= max_alloc_size) {
  1546. num_link_desc_banks = 0;
  1547. last_bank_size = total_mem_size;
  1548. } else {
  1549. num_link_desc_banks = (total_mem_size) /
  1550. (max_alloc_size - link_desc_align);
  1551. last_bank_size = total_mem_size %
  1552. (max_alloc_size - link_desc_align);
  1553. }
  1554. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1555. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1556. total_mem_size, num_link_desc_banks);
  1557. for (i = 0; i < num_link_desc_banks; i++) {
  1558. if (!soc->dp_soc_reinit) {
  1559. baseaddr = &soc->link_desc_banks[i].
  1560. base_paddr_unaligned;
  1561. soc->link_desc_banks[i].base_vaddr_unaligned =
  1562. qdf_mem_alloc_consistent(soc->osdev,
  1563. soc->osdev->dev,
  1564. max_alloc_size,
  1565. baseaddr);
  1566. }
  1567. soc->link_desc_banks[i].size = max_alloc_size;
  1568. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1569. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1570. ((unsigned long)(
  1571. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1572. link_desc_align));
  1573. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1574. soc->link_desc_banks[i].base_paddr_unaligned) +
  1575. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1576. (unsigned long)(
  1577. soc->link_desc_banks[i].base_vaddr_unaligned));
  1578. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1579. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1580. FL("Link descriptor memory alloc failed"));
  1581. goto fail;
  1582. }
  1583. }
  1584. if (last_bank_size) {
  1585. /* Allocate last bank in case total memory required is not exact
  1586. * multiple of max_alloc_size
  1587. */
  1588. if (!soc->dp_soc_reinit) {
  1589. baseaddr = &soc->link_desc_banks[i].
  1590. base_paddr_unaligned;
  1591. soc->link_desc_banks[i].base_vaddr_unaligned =
  1592. qdf_mem_alloc_consistent(soc->osdev,
  1593. soc->osdev->dev,
  1594. last_bank_size,
  1595. baseaddr);
  1596. }
  1597. soc->link_desc_banks[i].size = last_bank_size;
  1598. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1599. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1600. ((unsigned long)(
  1601. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1602. link_desc_align));
  1603. soc->link_desc_banks[i].base_paddr =
  1604. (unsigned long)(
  1605. soc->link_desc_banks[i].base_paddr_unaligned) +
  1606. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1607. (unsigned long)(
  1608. soc->link_desc_banks[i].base_vaddr_unaligned));
  1609. }
  1610. /* Allocate and setup link descriptor idle list for HW internal use */
  1611. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1612. total_mem_size = entry_size * total_link_descs;
  1613. if (total_mem_size <= max_alloc_size) {
  1614. void *desc;
  1615. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1616. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1617. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1618. FL("Link desc idle ring setup failed"));
  1619. goto fail;
  1620. }
  1621. hal_srng_access_start_unlocked(soc->hal_soc,
  1622. soc->wbm_idle_link_ring.hal_srng);
  1623. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1624. soc->link_desc_banks[i].base_paddr; i++) {
  1625. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1626. ((unsigned long)(
  1627. soc->link_desc_banks[i].base_vaddr) -
  1628. (unsigned long)(
  1629. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1630. / link_desc_size;
  1631. unsigned long paddr = (unsigned long)(
  1632. soc->link_desc_banks[i].base_paddr);
  1633. while (num_entries && (desc = hal_srng_src_get_next(
  1634. soc->hal_soc,
  1635. soc->wbm_idle_link_ring.hal_srng))) {
  1636. hal_set_link_desc_addr(desc,
  1637. LINK_DESC_COOKIE(desc_id, i), paddr);
  1638. num_entries--;
  1639. desc_id++;
  1640. paddr += link_desc_size;
  1641. }
  1642. }
  1643. hal_srng_access_end_unlocked(soc->hal_soc,
  1644. soc->wbm_idle_link_ring.hal_srng);
  1645. } else {
  1646. uint32_t num_scatter_bufs;
  1647. uint32_t num_entries_per_buf;
  1648. uint32_t rem_entries;
  1649. uint8_t *scatter_buf_ptr;
  1650. uint16_t scatter_buf_num;
  1651. uint32_t buf_size = 0;
  1652. soc->wbm_idle_scatter_buf_size =
  1653. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1654. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1655. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1656. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1657. soc->hal_soc, total_mem_size,
  1658. soc->wbm_idle_scatter_buf_size);
  1659. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1660. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1661. FL("scatter bufs size out of bounds"));
  1662. goto fail;
  1663. }
  1664. for (i = 0; i < num_scatter_bufs; i++) {
  1665. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1666. if (!soc->dp_soc_reinit) {
  1667. buf_size = soc->wbm_idle_scatter_buf_size;
  1668. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1669. qdf_mem_alloc_consistent(soc->osdev,
  1670. soc->osdev->
  1671. dev,
  1672. buf_size,
  1673. baseaddr);
  1674. }
  1675. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1676. QDF_TRACE(QDF_MODULE_ID_DP,
  1677. QDF_TRACE_LEVEL_ERROR,
  1678. FL("Scatter lst memory alloc fail"));
  1679. goto fail;
  1680. }
  1681. }
  1682. /* Populate idle list scatter buffers with link descriptor
  1683. * pointers
  1684. */
  1685. scatter_buf_num = 0;
  1686. scatter_buf_ptr = (uint8_t *)(
  1687. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1688. rem_entries = num_entries_per_buf;
  1689. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1690. soc->link_desc_banks[i].base_paddr; i++) {
  1691. uint32_t num_link_descs =
  1692. (soc->link_desc_banks[i].size -
  1693. ((unsigned long)(
  1694. soc->link_desc_banks[i].base_vaddr) -
  1695. (unsigned long)(
  1696. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1697. / link_desc_size;
  1698. unsigned long paddr = (unsigned long)(
  1699. soc->link_desc_banks[i].base_paddr);
  1700. while (num_link_descs) {
  1701. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1702. LINK_DESC_COOKIE(desc_id, i), paddr);
  1703. num_link_descs--;
  1704. desc_id++;
  1705. paddr += link_desc_size;
  1706. rem_entries--;
  1707. if (rem_entries) {
  1708. scatter_buf_ptr += entry_size;
  1709. } else {
  1710. rem_entries = num_entries_per_buf;
  1711. scatter_buf_num++;
  1712. if (scatter_buf_num >= num_scatter_bufs)
  1713. break;
  1714. scatter_buf_ptr = (uint8_t *)(
  1715. soc->wbm_idle_scatter_buf_base_vaddr[
  1716. scatter_buf_num]);
  1717. }
  1718. }
  1719. }
  1720. /* Setup link descriptor idle list in HW */
  1721. hal_setup_link_idle_list(soc->hal_soc,
  1722. soc->wbm_idle_scatter_buf_base_paddr,
  1723. soc->wbm_idle_scatter_buf_base_vaddr,
  1724. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1725. (uint32_t)(scatter_buf_ptr -
  1726. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1727. scatter_buf_num-1])), total_link_descs);
  1728. }
  1729. return 0;
  1730. fail:
  1731. if (soc->wbm_idle_link_ring.hal_srng) {
  1732. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1733. WBM_IDLE_LINK, 0);
  1734. }
  1735. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1736. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1737. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1738. soc->wbm_idle_scatter_buf_size,
  1739. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1740. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1741. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1742. }
  1743. }
  1744. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1745. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1746. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1747. soc->link_desc_banks[i].size,
  1748. soc->link_desc_banks[i].base_vaddr_unaligned,
  1749. soc->link_desc_banks[i].base_paddr_unaligned,
  1750. 0);
  1751. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1752. }
  1753. }
  1754. return QDF_STATUS_E_FAILURE;
  1755. }
  1756. /*
  1757. * Free link descriptor pool that was setup HW
  1758. */
  1759. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1760. {
  1761. int i;
  1762. if (soc->wbm_idle_link_ring.hal_srng) {
  1763. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1764. WBM_IDLE_LINK, 0);
  1765. }
  1766. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1767. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1768. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1769. soc->wbm_idle_scatter_buf_size,
  1770. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1771. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1772. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1773. }
  1774. }
  1775. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1776. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1777. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1778. soc->link_desc_banks[i].size,
  1779. soc->link_desc_banks[i].base_vaddr_unaligned,
  1780. soc->link_desc_banks[i].base_paddr_unaligned,
  1781. 0);
  1782. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1783. }
  1784. }
  1785. }
  1786. #ifdef IPA_OFFLOAD
  1787. #define REO_DST_RING_SIZE_QCA6290 1023
  1788. #ifndef QCA_WIFI_QCA8074_VP
  1789. #define REO_DST_RING_SIZE_QCA8074 1023
  1790. #else
  1791. #define REO_DST_RING_SIZE_QCA8074 8
  1792. #endif /* QCA_WIFI_QCA8074_VP */
  1793. #else
  1794. #define REO_DST_RING_SIZE_QCA6290 1024
  1795. #ifndef QCA_WIFI_QCA8074_VP
  1796. #define REO_DST_RING_SIZE_QCA8074 2048
  1797. #else
  1798. #define REO_DST_RING_SIZE_QCA8074 8
  1799. #endif /* QCA_WIFI_QCA8074_VP */
  1800. #endif /* IPA_OFFLOAD */
  1801. /*
  1802. * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
  1803. * @soc: Datapath SOC handle
  1804. *
  1805. * This is a timer function used to age out stale AST nodes from
  1806. * AST table
  1807. */
  1808. #ifdef FEATURE_WDS
  1809. static void dp_ast_aging_timer_fn(void *soc_hdl)
  1810. {
  1811. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1812. struct dp_pdev *pdev;
  1813. struct dp_vdev *vdev;
  1814. struct dp_peer *peer;
  1815. struct dp_ast_entry *ase, *temp_ase;
  1816. int i;
  1817. bool check_wds_ase = false;
  1818. if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
  1819. soc->wds_ast_aging_timer_cnt = 0;
  1820. check_wds_ase = true;
  1821. }
  1822. qdf_spin_lock_bh(&soc->ast_lock);
  1823. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1824. pdev = soc->pdev_list[i];
  1825. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1826. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1827. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1828. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1829. /*
  1830. * Do not expire static ast entries
  1831. * and HM WDS entries
  1832. */
  1833. if (ase->type !=
  1834. CDP_TXRX_AST_TYPE_WDS &&
  1835. ase->type !=
  1836. CDP_TXRX_AST_TYPE_MEC &&
  1837. ase->type !=
  1838. CDP_TXRX_AST_TYPE_DA)
  1839. continue;
  1840. /* Expire MEC entry every n sec.
  1841. * This needs to be expired in
  1842. * case if STA backbone is made as
  1843. * AP backbone, In this case it needs
  1844. * to be re-added as a WDS entry.
  1845. */
  1846. if (ase->is_active && ase->type ==
  1847. CDP_TXRX_AST_TYPE_MEC) {
  1848. ase->is_active = FALSE;
  1849. continue;
  1850. } else if (ase->is_active &&
  1851. check_wds_ase) {
  1852. ase->is_active = FALSE;
  1853. continue;
  1854. }
  1855. if (ase->type ==
  1856. CDP_TXRX_AST_TYPE_MEC) {
  1857. DP_STATS_INC(soc,
  1858. ast.aged_out, 1);
  1859. dp_peer_del_ast(soc, ase);
  1860. } else if (check_wds_ase) {
  1861. DP_STATS_INC(soc,
  1862. ast.aged_out, 1);
  1863. dp_peer_del_ast(soc, ase);
  1864. }
  1865. }
  1866. }
  1867. }
  1868. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1869. }
  1870. qdf_spin_unlock_bh(&soc->ast_lock);
  1871. if (qdf_atomic_read(&soc->cmn_init_done))
  1872. qdf_timer_mod(&soc->ast_aging_timer,
  1873. DP_AST_AGING_TIMER_DEFAULT_MS);
  1874. }
  1875. /*
  1876. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1877. * @soc: Datapath SOC handle
  1878. *
  1879. * Return: None
  1880. */
  1881. static void dp_soc_wds_attach(struct dp_soc *soc)
  1882. {
  1883. soc->wds_ast_aging_timer_cnt = 0;
  1884. qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
  1885. dp_ast_aging_timer_fn, (void *)soc,
  1886. QDF_TIMER_TYPE_WAKE_APPS);
  1887. qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
  1888. }
  1889. /*
  1890. * dp_soc_wds_detach() - Detach WDS data structures and timers
  1891. * @txrx_soc: DP SOC handle
  1892. *
  1893. * Return: None
  1894. */
  1895. static void dp_soc_wds_detach(struct dp_soc *soc)
  1896. {
  1897. qdf_timer_stop(&soc->ast_aging_timer);
  1898. qdf_timer_free(&soc->ast_aging_timer);
  1899. }
  1900. #else
  1901. static void dp_soc_wds_attach(struct dp_soc *soc)
  1902. {
  1903. }
  1904. static void dp_soc_wds_detach(struct dp_soc *soc)
  1905. {
  1906. }
  1907. #endif
  1908. /*
  1909. * dp_soc_reset_ring_map() - Reset cpu ring map
  1910. * @soc: Datapath soc handler
  1911. *
  1912. * This api resets the default cpu ring map
  1913. */
  1914. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  1915. {
  1916. uint8_t i;
  1917. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1918. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  1919. switch (nss_config) {
  1920. case dp_nss_cfg_first_radio:
  1921. /*
  1922. * Setting Tx ring map for one nss offloaded radio
  1923. */
  1924. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  1925. break;
  1926. case dp_nss_cfg_second_radio:
  1927. /*
  1928. * Setting Tx ring for two nss offloaded radios
  1929. */
  1930. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  1931. break;
  1932. case dp_nss_cfg_dbdc:
  1933. /*
  1934. * Setting Tx ring map for 2 nss offloaded radios
  1935. */
  1936. soc->tx_ring_map[i] =
  1937. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  1938. break;
  1939. case dp_nss_cfg_dbtc:
  1940. /*
  1941. * Setting Tx ring map for 3 nss offloaded radios
  1942. */
  1943. soc->tx_ring_map[i] =
  1944. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  1945. break;
  1946. default:
  1947. dp_err("tx_ring_map failed due to invalid nss cfg");
  1948. break;
  1949. }
  1950. }
  1951. }
  1952. /*
  1953. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  1954. * @dp_soc - DP soc handle
  1955. * @ring_type - ring type
  1956. * @ring_num - ring_num
  1957. *
  1958. * return 0 or 1
  1959. */
  1960. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  1961. {
  1962. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1963. uint8_t status = 0;
  1964. switch (ring_type) {
  1965. case WBM2SW_RELEASE:
  1966. case REO_DST:
  1967. case RXDMA_BUF:
  1968. status = ((nss_config) & (1 << ring_num));
  1969. break;
  1970. default:
  1971. break;
  1972. }
  1973. return status;
  1974. }
  1975. /*
  1976. * dp_soc_reset_intr_mask() - reset interrupt mask
  1977. * @dp_soc - DP Soc handle
  1978. *
  1979. * Return: Return void
  1980. */
  1981. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  1982. {
  1983. uint8_t j;
  1984. int *grp_mask = NULL;
  1985. int group_number, mask, num_ring;
  1986. /* number of tx ring */
  1987. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  1988. /*
  1989. * group mask for tx completion ring.
  1990. */
  1991. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  1992. /* loop and reset the mask for only offloaded ring */
  1993. for (j = 0; j < num_ring; j++) {
  1994. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  1995. continue;
  1996. }
  1997. /*
  1998. * Group number corresponding to tx offloaded ring.
  1999. */
  2000. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2001. if (group_number < 0) {
  2002. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2003. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2004. WBM2SW_RELEASE, j);
  2005. return;
  2006. }
  2007. /* reset the tx mask for offloaded ring */
  2008. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2009. mask &= (~(1 << j));
  2010. /*
  2011. * reset the interrupt mask for offloaded ring.
  2012. */
  2013. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2014. }
  2015. /* number of rx rings */
  2016. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2017. /*
  2018. * group mask for reo destination ring.
  2019. */
  2020. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2021. /* loop and reset the mask for only offloaded ring */
  2022. for (j = 0; j < num_ring; j++) {
  2023. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2024. continue;
  2025. }
  2026. /*
  2027. * Group number corresponding to rx offloaded ring.
  2028. */
  2029. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2030. if (group_number < 0) {
  2031. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2032. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2033. REO_DST, j);
  2034. return;
  2035. }
  2036. /* set the interrupt mask for offloaded ring */
  2037. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2038. mask &= (~(1 << j));
  2039. /*
  2040. * set the interrupt mask to zero for rx offloaded radio.
  2041. */
  2042. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2043. }
  2044. /*
  2045. * group mask for Rx buffer refill ring
  2046. */
  2047. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2048. /* loop and reset the mask for only offloaded ring */
  2049. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2050. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2051. continue;
  2052. }
  2053. /*
  2054. * Group number corresponding to rx offloaded ring.
  2055. */
  2056. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2057. if (group_number < 0) {
  2058. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2059. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2060. REO_DST, j);
  2061. return;
  2062. }
  2063. /* set the interrupt mask for offloaded ring */
  2064. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2065. group_number);
  2066. mask &= (~(1 << j));
  2067. /*
  2068. * set the interrupt mask to zero for rx offloaded radio.
  2069. */
  2070. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2071. group_number, mask);
  2072. }
  2073. }
  2074. #ifdef IPA_OFFLOAD
  2075. /**
  2076. * dp_reo_remap_config() - configure reo remap register value based
  2077. * nss configuration.
  2078. * based on offload_radio value below remap configuration
  2079. * get applied.
  2080. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2081. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2082. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2083. * 3 - both Radios handled by NSS (remap not required)
  2084. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2085. *
  2086. * @remap1: output parameter indicates reo remap 1 register value
  2087. * @remap2: output parameter indicates reo remap 2 register value
  2088. * Return: bool type, true if remap is configured else false.
  2089. */
  2090. static bool dp_reo_remap_config(struct dp_soc *soc,
  2091. uint32_t *remap1,
  2092. uint32_t *remap2)
  2093. {
  2094. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  2095. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  2096. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  2097. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  2098. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2099. return true;
  2100. }
  2101. #else
  2102. static bool dp_reo_remap_config(struct dp_soc *soc,
  2103. uint32_t *remap1,
  2104. uint32_t *remap2)
  2105. {
  2106. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2107. switch (offload_radio) {
  2108. case dp_nss_cfg_default:
  2109. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2110. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2111. (0x3 << 18) | (0x4 << 21)) << 8;
  2112. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2113. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2114. (0x3 << 18) | (0x4 << 21)) << 8;
  2115. break;
  2116. case dp_nss_cfg_first_radio:
  2117. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  2118. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  2119. (0x2 << 18) | (0x3 << 21)) << 8;
  2120. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  2121. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  2122. (0x4 << 18) | (0x2 << 21)) << 8;
  2123. break;
  2124. case dp_nss_cfg_second_radio:
  2125. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  2126. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  2127. (0x1 << 18) | (0x3 << 21)) << 8;
  2128. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  2129. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  2130. (0x4 << 18) | (0x1 << 21)) << 8;
  2131. break;
  2132. case dp_nss_cfg_dbdc:
  2133. case dp_nss_cfg_dbtc:
  2134. /* return false if both or all are offloaded to NSS */
  2135. return false;
  2136. }
  2137. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2138. *remap1, *remap2, offload_radio);
  2139. return true;
  2140. }
  2141. #endif
  2142. /*
  2143. * dp_reo_frag_dst_set() - configure reo register to set the
  2144. * fragment destination ring
  2145. * @soc : Datapath soc
  2146. * @frag_dst_ring : output parameter to set fragment destination ring
  2147. *
  2148. * Based on offload_radio below fragment destination rings is selected
  2149. * 0 - TCL
  2150. * 1 - SW1
  2151. * 2 - SW2
  2152. * 3 - SW3
  2153. * 4 - SW4
  2154. * 5 - Release
  2155. * 6 - FW
  2156. * 7 - alternate select
  2157. *
  2158. * return: void
  2159. */
  2160. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2161. {
  2162. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2163. switch (offload_radio) {
  2164. case dp_nss_cfg_default:
  2165. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2166. break;
  2167. case dp_nss_cfg_dbdc:
  2168. case dp_nss_cfg_dbtc:
  2169. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2170. break;
  2171. default:
  2172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2173. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2174. break;
  2175. }
  2176. }
  2177. /*
  2178. * dp_soc_cmn_setup() - Common SoC level initializion
  2179. * @soc: Datapath SOC handle
  2180. *
  2181. * This is an internal function used to setup common SOC data structures,
  2182. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2183. */
  2184. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2185. {
  2186. int i;
  2187. struct hal_reo_params reo_params;
  2188. int tx_ring_size;
  2189. int tx_comp_ring_size;
  2190. int reo_dst_ring_size;
  2191. uint32_t entries;
  2192. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2193. if (qdf_atomic_read(&soc->cmn_init_done))
  2194. return 0;
  2195. if (dp_hw_link_desc_pool_setup(soc))
  2196. goto fail1;
  2197. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2198. /* Setup SRNG rings */
  2199. /* Common rings */
  2200. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2201. wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
  2202. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2203. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2204. goto fail1;
  2205. }
  2206. soc->num_tcl_data_rings = 0;
  2207. /* Tx data rings */
  2208. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2209. soc->num_tcl_data_rings =
  2210. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2211. tx_comp_ring_size =
  2212. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2213. tx_ring_size =
  2214. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2215. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2216. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2217. TCL_DATA, i, 0, tx_ring_size)) {
  2218. QDF_TRACE(QDF_MODULE_ID_DP,
  2219. QDF_TRACE_LEVEL_ERROR,
  2220. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2221. goto fail1;
  2222. }
  2223. /*
  2224. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2225. * count
  2226. */
  2227. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2228. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  2229. QDF_TRACE(QDF_MODULE_ID_DP,
  2230. QDF_TRACE_LEVEL_ERROR,
  2231. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2232. goto fail1;
  2233. }
  2234. }
  2235. } else {
  2236. /* This will be incremented during per pdev ring setup */
  2237. soc->num_tcl_data_rings = 0;
  2238. }
  2239. if (dp_tx_soc_attach(soc)) {
  2240. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2241. FL("dp_tx_soc_attach failed"));
  2242. goto fail1;
  2243. }
  2244. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2245. /* TCL command and status rings */
  2246. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2247. entries)) {
  2248. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2249. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2250. goto fail1;
  2251. }
  2252. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2253. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2254. entries)) {
  2255. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2256. FL("dp_srng_setup failed for tcl_status_ring"));
  2257. goto fail1;
  2258. }
  2259. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2260. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2261. * descriptors
  2262. */
  2263. /* Rx data rings */
  2264. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2265. soc->num_reo_dest_rings =
  2266. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2267. QDF_TRACE(QDF_MODULE_ID_DP,
  2268. QDF_TRACE_LEVEL_INFO,
  2269. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2270. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2271. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2272. i, 0, reo_dst_ring_size)) {
  2273. QDF_TRACE(QDF_MODULE_ID_DP,
  2274. QDF_TRACE_LEVEL_ERROR,
  2275. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2276. goto fail1;
  2277. }
  2278. }
  2279. } else {
  2280. /* This will be incremented during per pdev ring setup */
  2281. soc->num_reo_dest_rings = 0;
  2282. }
  2283. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2284. /* LMAC RxDMA to SW Rings configuration */
  2285. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2286. /* Only valid for MCL */
  2287. struct dp_pdev *pdev = soc->pdev_list[0];
  2288. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2289. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2290. RXDMA_DST, 0, i,
  2291. entries)) {
  2292. QDF_TRACE(QDF_MODULE_ID_DP,
  2293. QDF_TRACE_LEVEL_ERROR,
  2294. FL(RNG_ERR "rxdma_err_dst_ring"));
  2295. goto fail1;
  2296. }
  2297. }
  2298. }
  2299. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2300. /* REO reinjection ring */
  2301. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2302. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2303. entries)) {
  2304. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2305. FL("dp_srng_setup failed for reo_reinject_ring"));
  2306. goto fail1;
  2307. }
  2308. /* Rx release ring */
  2309. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2310. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
  2311. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2312. FL("dp_srng_setup failed for rx_rel_ring"));
  2313. goto fail1;
  2314. }
  2315. /* Rx exception ring */
  2316. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2317. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2318. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
  2319. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2320. FL("dp_srng_setup failed for reo_exception_ring"));
  2321. goto fail1;
  2322. }
  2323. /* REO command and status rings */
  2324. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2325. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
  2326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2327. FL("dp_srng_setup failed for reo_cmd_ring"));
  2328. goto fail1;
  2329. }
  2330. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2331. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2332. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2333. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2334. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
  2335. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2336. FL("dp_srng_setup failed for reo_status_ring"));
  2337. goto fail1;
  2338. }
  2339. qdf_spinlock_create(&soc->ast_lock);
  2340. dp_soc_wds_attach(soc);
  2341. /* Reset the cpu ring map if radio is NSS offloaded */
  2342. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2343. dp_soc_reset_cpu_ring_map(soc);
  2344. dp_soc_reset_intr_mask(soc);
  2345. }
  2346. /* Setup HW REO */
  2347. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2348. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2349. /*
  2350. * Reo ring remap is not required if both radios
  2351. * are offloaded to NSS
  2352. */
  2353. if (!dp_reo_remap_config(soc,
  2354. &reo_params.remap1,
  2355. &reo_params.remap2))
  2356. goto out;
  2357. reo_params.rx_hash_enabled = true;
  2358. }
  2359. /* setup the global rx defrag waitlist */
  2360. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2361. soc->rx.defrag.timeout_ms =
  2362. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2363. soc->rx.flags.defrag_timeout_check =
  2364. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2365. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2366. out:
  2367. /*
  2368. * set the fragment destination ring
  2369. */
  2370. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2371. hal_reo_setup(soc->hal_soc, &reo_params);
  2372. qdf_atomic_set(&soc->cmn_init_done, 1);
  2373. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2374. return 0;
  2375. fail1:
  2376. /*
  2377. * Cleanup will be done as part of soc_detach, which will
  2378. * be called on pdev attach failure
  2379. */
  2380. return QDF_STATUS_E_FAILURE;
  2381. }
  2382. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  2383. static void dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2384. {
  2385. struct cdp_lro_hash_config lro_hash;
  2386. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2387. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2388. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2389. FL("LRO disabled RX hash disabled"));
  2390. return;
  2391. }
  2392. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2393. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
  2394. lro_hash.lro_enable = 1;
  2395. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2396. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2397. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2398. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2399. }
  2400. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
  2401. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2402. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2403. LRO_IPV4_SEED_ARR_SZ));
  2404. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2405. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2406. LRO_IPV6_SEED_ARR_SZ));
  2407. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  2408. "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2409. lro_hash.lro_enable, lro_hash.tcp_flag,
  2410. lro_hash.tcp_flag_mask);
  2411. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  2412. QDF_TRACE_LEVEL_ERROR,
  2413. (void *)lro_hash.toeplitz_hash_ipv4,
  2414. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2415. LRO_IPV4_SEED_ARR_SZ));
  2416. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  2417. QDF_TRACE_LEVEL_ERROR,
  2418. (void *)lro_hash.toeplitz_hash_ipv6,
  2419. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2420. LRO_IPV6_SEED_ARR_SZ));
  2421. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2422. if (soc->cdp_soc.ol_ops->lro_hash_config)
  2423. (void)soc->cdp_soc.ol_ops->lro_hash_config
  2424. (pdev->ctrl_pdev, &lro_hash);
  2425. }
  2426. /*
  2427. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2428. * @soc: data path SoC handle
  2429. * @pdev: Physical device handle
  2430. *
  2431. * Return: 0 - success, > 0 - failure
  2432. */
  2433. #ifdef QCA_HOST2FW_RXBUF_RING
  2434. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2435. struct dp_pdev *pdev)
  2436. {
  2437. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2438. int max_mac_rings;
  2439. int i;
  2440. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2441. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2442. for (i = 0; i < max_mac_rings; i++) {
  2443. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2444. "%s: pdev_id %d mac_id %d",
  2445. __func__, pdev->pdev_id, i);
  2446. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2447. RXDMA_BUF, 1, i,
  2448. wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
  2449. QDF_TRACE(QDF_MODULE_ID_DP,
  2450. QDF_TRACE_LEVEL_ERROR,
  2451. FL("failed rx mac ring setup"));
  2452. return QDF_STATUS_E_FAILURE;
  2453. }
  2454. }
  2455. return QDF_STATUS_SUCCESS;
  2456. }
  2457. #else
  2458. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2459. struct dp_pdev *pdev)
  2460. {
  2461. return QDF_STATUS_SUCCESS;
  2462. }
  2463. #endif
  2464. /**
  2465. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2466. * @pdev - DP_PDEV handle
  2467. *
  2468. * Return: void
  2469. */
  2470. static inline void
  2471. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2472. {
  2473. uint8_t map_id;
  2474. struct dp_soc *soc = pdev->soc;
  2475. if (!soc)
  2476. return;
  2477. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2478. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2479. default_dscp_tid_map,
  2480. sizeof(default_dscp_tid_map));
  2481. }
  2482. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2483. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2484. default_dscp_tid_map,
  2485. map_id);
  2486. }
  2487. }
  2488. #ifdef IPA_OFFLOAD
  2489. /**
  2490. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2491. * @soc: data path instance
  2492. * @pdev: core txrx pdev context
  2493. *
  2494. * Return: QDF_STATUS_SUCCESS: success
  2495. * QDF_STATUS_E_RESOURCES: Error return
  2496. */
  2497. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2498. struct dp_pdev *pdev)
  2499. {
  2500. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2501. int entries;
  2502. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2503. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2504. /* Setup second Rx refill buffer ring */
  2505. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2506. IPA_RX_REFILL_BUF_RING_IDX,
  2507. pdev->pdev_id,
  2508. entries)) {
  2509. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2510. FL("dp_srng_setup failed second rx refill ring"));
  2511. return QDF_STATUS_E_FAILURE;
  2512. }
  2513. return QDF_STATUS_SUCCESS;
  2514. }
  2515. /**
  2516. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2517. * @soc: data path instance
  2518. * @pdev: core txrx pdev context
  2519. *
  2520. * Return: void
  2521. */
  2522. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2523. struct dp_pdev *pdev)
  2524. {
  2525. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2526. IPA_RX_REFILL_BUF_RING_IDX);
  2527. }
  2528. #else
  2529. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2530. struct dp_pdev *pdev)
  2531. {
  2532. return QDF_STATUS_SUCCESS;
  2533. }
  2534. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2535. struct dp_pdev *pdev)
  2536. {
  2537. }
  2538. #endif
  2539. #if !defined(DISABLE_MON_CONFIG)
  2540. /**
  2541. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2542. * @soc: soc handle
  2543. * @pdev: physical device handle
  2544. *
  2545. * Return: nonzero on failure and zero on success
  2546. */
  2547. static
  2548. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2549. {
  2550. int mac_id = 0;
  2551. int pdev_id = pdev->pdev_id;
  2552. int entries;
  2553. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2554. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2555. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2556. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2557. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2558. entries =
  2559. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2560. if (dp_srng_setup(soc,
  2561. &pdev->rxdma_mon_buf_ring[mac_id],
  2562. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2563. entries)) {
  2564. QDF_TRACE(QDF_MODULE_ID_DP,
  2565. QDF_TRACE_LEVEL_ERROR,
  2566. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2567. return QDF_STATUS_E_NOMEM;
  2568. }
  2569. entries =
  2570. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2571. if (dp_srng_setup(soc,
  2572. &pdev->rxdma_mon_dst_ring[mac_id],
  2573. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2574. entries)) {
  2575. QDF_TRACE(QDF_MODULE_ID_DP,
  2576. QDF_TRACE_LEVEL_ERROR,
  2577. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2578. return QDF_STATUS_E_NOMEM;
  2579. }
  2580. entries =
  2581. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2582. if (dp_srng_setup(soc,
  2583. &pdev->rxdma_mon_status_ring[mac_id],
  2584. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2585. entries)) {
  2586. QDF_TRACE(QDF_MODULE_ID_DP,
  2587. QDF_TRACE_LEVEL_ERROR,
  2588. FL(RNG_ERR "rxdma_mon_status_ring"));
  2589. return QDF_STATUS_E_NOMEM;
  2590. }
  2591. entries =
  2592. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2593. if (dp_srng_setup(soc,
  2594. &pdev->rxdma_mon_desc_ring[mac_id],
  2595. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2596. entries)) {
  2597. QDF_TRACE(QDF_MODULE_ID_DP,
  2598. QDF_TRACE_LEVEL_ERROR,
  2599. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2600. return QDF_STATUS_E_NOMEM;
  2601. }
  2602. } else {
  2603. entries =
  2604. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2605. if (dp_srng_setup(soc,
  2606. &pdev->rxdma_mon_status_ring[mac_id],
  2607. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2608. entries)) {
  2609. QDF_TRACE(QDF_MODULE_ID_DP,
  2610. QDF_TRACE_LEVEL_ERROR,
  2611. FL(RNG_ERR "rxdma_mon_status_ring"));
  2612. return QDF_STATUS_E_NOMEM;
  2613. }
  2614. }
  2615. }
  2616. return QDF_STATUS_SUCCESS;
  2617. }
  2618. #else
  2619. static
  2620. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2621. {
  2622. return QDF_STATUS_SUCCESS;
  2623. }
  2624. #endif
  2625. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  2626. * @pdev_hdl: pdev handle
  2627. */
  2628. #ifdef ATH_SUPPORT_EXT_STAT
  2629. void dp_iterate_update_peer_list(void *pdev_hdl)
  2630. {
  2631. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  2632. struct dp_vdev *vdev = NULL;
  2633. struct dp_peer *peer = NULL;
  2634. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  2635. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  2636. dp_cal_client_update_peer_stats(&peer->stats);
  2637. }
  2638. }
  2639. }
  2640. #else
  2641. void dp_iterate_update_peer_list(void *pdev_hdl)
  2642. {
  2643. }
  2644. #endif
  2645. /*
  2646. * dp_pdev_attach_wifi3() - attach txrx pdev
  2647. * @ctrl_pdev: Opaque PDEV object
  2648. * @txrx_soc: Datapath SOC handle
  2649. * @htc_handle: HTC handle for host-target interface
  2650. * @qdf_osdev: QDF OS device
  2651. * @pdev_id: PDEV ID
  2652. *
  2653. * Return: DP PDEV handle on success, NULL on failure
  2654. */
  2655. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  2656. struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  2657. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  2658. {
  2659. int tx_ring_size;
  2660. int tx_comp_ring_size;
  2661. int reo_dst_ring_size;
  2662. int entries;
  2663. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2664. int nss_cfg;
  2665. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2666. struct dp_pdev *pdev = NULL;
  2667. if (soc->dp_soc_reinit)
  2668. pdev = soc->pdev_list[pdev_id];
  2669. else
  2670. pdev = qdf_mem_malloc(sizeof(*pdev));
  2671. if (!pdev) {
  2672. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2673. FL("DP PDEV memory allocation failed"));
  2674. goto fail0;
  2675. }
  2676. /*
  2677. * Variable to prevent double pdev deinitialization during
  2678. * radio detach execution .i.e. in the absence of any vdev.
  2679. */
  2680. pdev->pdev_deinit = 0;
  2681. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  2682. if (!pdev->invalid_peer) {
  2683. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2684. FL("Invalid peer memory allocation failed"));
  2685. qdf_mem_free(pdev);
  2686. goto fail0;
  2687. }
  2688. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2689. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  2690. if (!pdev->wlan_cfg_ctx) {
  2691. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2692. FL("pdev cfg_attach failed"));
  2693. qdf_mem_free(pdev->invalid_peer);
  2694. qdf_mem_free(pdev);
  2695. goto fail0;
  2696. }
  2697. /*
  2698. * set nss pdev config based on soc config
  2699. */
  2700. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  2701. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  2702. (nss_cfg & (1 << pdev_id)));
  2703. pdev->soc = soc;
  2704. pdev->ctrl_pdev = ctrl_pdev;
  2705. pdev->pdev_id = pdev_id;
  2706. soc->pdev_list[pdev_id] = pdev;
  2707. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  2708. soc->pdev_count++;
  2709. TAILQ_INIT(&pdev->vdev_list);
  2710. qdf_spinlock_create(&pdev->vdev_list_lock);
  2711. pdev->vdev_count = 0;
  2712. qdf_spinlock_create(&pdev->tx_mutex);
  2713. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  2714. TAILQ_INIT(&pdev->neighbour_peers_list);
  2715. pdev->neighbour_peers_added = false;
  2716. if (dp_soc_cmn_setup(soc)) {
  2717. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2718. FL("dp_soc_cmn_setup failed"));
  2719. goto fail1;
  2720. }
  2721. /* Setup per PDEV TCL rings if configured */
  2722. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2723. tx_ring_size =
  2724. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2725. tx_comp_ring_size =
  2726. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2727. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  2728. pdev_id, pdev_id, tx_ring_size)) {
  2729. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2730. FL("dp_srng_setup failed for tcl_data_ring"));
  2731. goto fail1;
  2732. }
  2733. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  2734. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  2735. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2736. FL("dp_srng_setup failed for tx_comp_ring"));
  2737. goto fail1;
  2738. }
  2739. soc->num_tcl_data_rings++;
  2740. }
  2741. /* Tx specific init */
  2742. if (dp_tx_pdev_attach(pdev)) {
  2743. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2744. FL("dp_tx_pdev_attach failed"));
  2745. goto fail1;
  2746. }
  2747. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2748. /* Setup per PDEV REO rings if configured */
  2749. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2750. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  2751. pdev_id, pdev_id, reo_dst_ring_size)) {
  2752. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2753. FL("dp_srng_setup failed for reo_dest_ringn"));
  2754. goto fail1;
  2755. }
  2756. soc->num_reo_dest_rings++;
  2757. }
  2758. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  2759. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
  2760. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2761. FL("dp_srng_setup failed rx refill ring"));
  2762. goto fail1;
  2763. }
  2764. if (dp_rxdma_ring_setup(soc, pdev)) {
  2765. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2766. FL("RXDMA ring config failed"));
  2767. goto fail1;
  2768. }
  2769. if (dp_mon_rings_setup(soc, pdev)) {
  2770. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2771. FL("MONITOR rings setup failed"));
  2772. goto fail1;
  2773. }
  2774. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2775. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2776. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  2777. 0, pdev_id,
  2778. entries)) {
  2779. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2780. FL(RNG_ERR "rxdma_err_dst_ring"));
  2781. goto fail1;
  2782. }
  2783. }
  2784. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  2785. goto fail1;
  2786. if (dp_ipa_ring_resource_setup(soc, pdev))
  2787. goto fail1;
  2788. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  2789. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2790. FL("dp_ipa_uc_attach failed"));
  2791. goto fail1;
  2792. }
  2793. /* Rx specific init */
  2794. if (dp_rx_pdev_attach(pdev)) {
  2795. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2796. FL("dp_rx_pdev_attach failed"));
  2797. goto fail1;
  2798. }
  2799. DP_STATS_INIT(pdev);
  2800. /* Monitor filter init */
  2801. pdev->mon_filter_mode = MON_FILTER_ALL;
  2802. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  2803. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  2804. pdev->fp_data_filter = FILTER_DATA_ALL;
  2805. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  2806. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  2807. pdev->mo_data_filter = FILTER_DATA_ALL;
  2808. dp_local_peer_id_pool_init(pdev);
  2809. dp_dscp_tid_map_setup(pdev);
  2810. /* Rx monitor mode specific init */
  2811. if (dp_rx_pdev_mon_attach(pdev)) {
  2812. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2813. "dp_rx_pdev_mon_attach failed");
  2814. goto fail1;
  2815. }
  2816. if (dp_wdi_event_attach(pdev)) {
  2817. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2818. "dp_wdi_evet_attach failed");
  2819. goto fail1;
  2820. }
  2821. /* set the reo destination during initialization */
  2822. pdev->reo_dest = pdev->pdev_id + 1;
  2823. /*
  2824. * initialize ppdu tlv list
  2825. */
  2826. TAILQ_INIT(&pdev->ppdu_info_list);
  2827. pdev->tlv_count = 0;
  2828. pdev->list_depth = 0;
  2829. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  2830. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  2831. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  2832. TRUE);
  2833. /* initlialize cal client timer */
  2834. dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
  2835. &dp_iterate_update_peer_list);
  2836. return (struct cdp_pdev *)pdev;
  2837. fail1:
  2838. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  2839. fail0:
  2840. return NULL;
  2841. }
  2842. /*
  2843. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2844. * @soc: data path SoC handle
  2845. * @pdev: Physical device handle
  2846. *
  2847. * Return: void
  2848. */
  2849. #ifdef QCA_HOST2FW_RXBUF_RING
  2850. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2851. struct dp_pdev *pdev)
  2852. {
  2853. int max_mac_rings =
  2854. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2855. int i;
  2856. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2857. max_mac_rings : MAX_RX_MAC_RINGS;
  2858. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2859. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2860. RXDMA_BUF, 1);
  2861. qdf_timer_free(&soc->mon_reap_timer);
  2862. }
  2863. #else
  2864. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2865. struct dp_pdev *pdev)
  2866. {
  2867. }
  2868. #endif
  2869. /*
  2870. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2871. * @pdev: device object
  2872. *
  2873. * Return: void
  2874. */
  2875. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  2876. {
  2877. struct dp_neighbour_peer *peer = NULL;
  2878. struct dp_neighbour_peer *temp_peer = NULL;
  2879. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  2880. neighbour_peer_list_elem, temp_peer) {
  2881. /* delete this peer from the list */
  2882. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  2883. peer, neighbour_peer_list_elem);
  2884. qdf_mem_free(peer);
  2885. }
  2886. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  2887. }
  2888. /**
  2889. * dp_htt_ppdu_stats_detach() - detach stats resources
  2890. * @pdev: Datapath PDEV handle
  2891. *
  2892. * Return: void
  2893. */
  2894. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  2895. {
  2896. struct ppdu_info *ppdu_info, *ppdu_info_next;
  2897. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  2898. ppdu_info_list_elem, ppdu_info_next) {
  2899. if (!ppdu_info)
  2900. break;
  2901. qdf_assert_always(ppdu_info->nbuf);
  2902. qdf_nbuf_free(ppdu_info->nbuf);
  2903. qdf_mem_free(ppdu_info);
  2904. }
  2905. }
  2906. #if !defined(DISABLE_MON_CONFIG)
  2907. static
  2908. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2909. int mac_id)
  2910. {
  2911. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2912. dp_srng_cleanup(soc,
  2913. &pdev->rxdma_mon_buf_ring[mac_id],
  2914. RXDMA_MONITOR_BUF, 0);
  2915. dp_srng_cleanup(soc,
  2916. &pdev->rxdma_mon_dst_ring[mac_id],
  2917. RXDMA_MONITOR_DST, 0);
  2918. dp_srng_cleanup(soc,
  2919. &pdev->rxdma_mon_status_ring[mac_id],
  2920. RXDMA_MONITOR_STATUS, 0);
  2921. dp_srng_cleanup(soc,
  2922. &pdev->rxdma_mon_desc_ring[mac_id],
  2923. RXDMA_MONITOR_DESC, 0);
  2924. dp_srng_cleanup(soc,
  2925. &pdev->rxdma_err_dst_ring[mac_id],
  2926. RXDMA_DST, 0);
  2927. } else {
  2928. dp_srng_cleanup(soc,
  2929. &pdev->rxdma_mon_status_ring[mac_id],
  2930. RXDMA_MONITOR_STATUS, 0);
  2931. dp_srng_cleanup(soc,
  2932. &pdev->rxdma_err_dst_ring[mac_id],
  2933. RXDMA_DST, 0);
  2934. }
  2935. }
  2936. #else
  2937. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2938. int mac_id)
  2939. {
  2940. }
  2941. #endif
  2942. /**
  2943. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  2944. *
  2945. * @soc: soc handle
  2946. * @pdev: datapath physical dev handle
  2947. * @mac_id: mac number
  2948. *
  2949. * Return: None
  2950. */
  2951. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  2952. int mac_id)
  2953. {
  2954. }
  2955. /**
  2956. * dp_pdev_mem_reset() - Reset txrx pdev memory
  2957. * @pdev: dp pdev handle
  2958. *
  2959. * Return: None
  2960. */
  2961. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  2962. {
  2963. uint16_t len = 0;
  2964. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  2965. len = sizeof(struct dp_pdev) -
  2966. offsetof(struct dp_pdev, pdev_deinit) -
  2967. sizeof(pdev->pdev_deinit);
  2968. dp_pdev_offset = dp_pdev_offset +
  2969. offsetof(struct dp_pdev, pdev_deinit) +
  2970. sizeof(pdev->pdev_deinit);
  2971. qdf_mem_zero(dp_pdev_offset, len);
  2972. }
  2973. /**
  2974. * dp_pdev_deinit() - Deinit txrx pdev
  2975. * @txrx_pdev: Datapath PDEV handle
  2976. * @force: Force deinit
  2977. *
  2978. * Return: None
  2979. */
  2980. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  2981. {
  2982. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  2983. struct dp_soc *soc = pdev->soc;
  2984. qdf_nbuf_t curr_nbuf, next_nbuf;
  2985. int mac_id;
  2986. /*
  2987. * Prevent double pdev deinitialization during radio detach
  2988. * execution .i.e. in the absence of any vdev
  2989. */
  2990. if (pdev->pdev_deinit)
  2991. return;
  2992. pdev->pdev_deinit = 1;
  2993. dp_wdi_event_detach(pdev);
  2994. dp_tx_pdev_detach(pdev);
  2995. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2996. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  2997. TCL_DATA, pdev->pdev_id);
  2998. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  2999. WBM2SW_RELEASE, pdev->pdev_id);
  3000. }
  3001. dp_pktlogmod_exit(pdev);
  3002. dp_rx_pdev_detach(pdev);
  3003. dp_rx_pdev_mon_detach(pdev);
  3004. dp_neighbour_peers_detach(pdev);
  3005. qdf_spinlock_destroy(&pdev->tx_mutex);
  3006. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3007. dp_ipa_uc_detach(soc, pdev);
  3008. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3009. /* Cleanup per PDEV REO rings if configured */
  3010. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3011. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3012. REO_DST, pdev->pdev_id);
  3013. }
  3014. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3015. dp_rxdma_ring_cleanup(soc, pdev);
  3016. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3017. dp_mon_ring_deinit(soc, pdev, mac_id);
  3018. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3019. RXDMA_DST, 0);
  3020. }
  3021. curr_nbuf = pdev->invalid_peer_head_msdu;
  3022. while (curr_nbuf) {
  3023. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3024. qdf_nbuf_free(curr_nbuf);
  3025. curr_nbuf = next_nbuf;
  3026. }
  3027. pdev->invalid_peer_head_msdu = NULL;
  3028. pdev->invalid_peer_tail_msdu = NULL;
  3029. dp_htt_ppdu_stats_detach(pdev);
  3030. qdf_nbuf_free(pdev->sojourn_buf);
  3031. dp_cal_client_detach(&pdev->cal_client_ctx);
  3032. soc->pdev_count--;
  3033. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3034. qdf_mem_free(pdev->invalid_peer);
  3035. qdf_mem_free(pdev->dp_txrx_handle);
  3036. dp_pdev_mem_reset(pdev);
  3037. }
  3038. /**
  3039. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3040. * @txrx_pdev: Datapath PDEV handle
  3041. * @force: Force deinit
  3042. *
  3043. * Return: None
  3044. */
  3045. static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3046. {
  3047. dp_pdev_deinit(txrx_pdev, force);
  3048. }
  3049. /*
  3050. * dp_pdev_detach() - Complete rest of pdev detach
  3051. * @txrx_pdev: Datapath PDEV handle
  3052. * @force: Force deinit
  3053. *
  3054. * Return: None
  3055. */
  3056. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3057. {
  3058. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3059. struct dp_soc *soc = pdev->soc;
  3060. int mac_id;
  3061. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3062. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3063. TCL_DATA, pdev->pdev_id);
  3064. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3065. WBM2SW_RELEASE, pdev->pdev_id);
  3066. }
  3067. dp_mon_link_free(pdev);
  3068. /* Cleanup per PDEV REO rings if configured */
  3069. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3070. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3071. REO_DST, pdev->pdev_id);
  3072. }
  3073. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3074. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3075. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3076. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3077. RXDMA_DST, 0);
  3078. }
  3079. soc->pdev_list[pdev->pdev_id] = NULL;
  3080. qdf_mem_free(pdev);
  3081. }
  3082. /*
  3083. * dp_pdev_detach_wifi3() - detach txrx pdev
  3084. * @txrx_pdev: Datapath PDEV handle
  3085. * @force: Force detach
  3086. *
  3087. * Return: None
  3088. */
  3089. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3090. {
  3091. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3092. struct dp_soc *soc = pdev->soc;
  3093. if (soc->dp_soc_reinit) {
  3094. dp_pdev_detach(txrx_pdev, force);
  3095. } else {
  3096. dp_pdev_deinit(txrx_pdev, force);
  3097. dp_pdev_detach(txrx_pdev, force);
  3098. }
  3099. }
  3100. /*
  3101. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3102. * @soc: DP SOC handle
  3103. */
  3104. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3105. {
  3106. struct reo_desc_list_node *desc;
  3107. struct dp_rx_tid *rx_tid;
  3108. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3109. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3110. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3111. rx_tid = &desc->rx_tid;
  3112. qdf_mem_unmap_nbytes_single(soc->osdev,
  3113. rx_tid->hw_qdesc_paddr,
  3114. QDF_DMA_BIDIRECTIONAL,
  3115. rx_tid->hw_qdesc_alloc_size);
  3116. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3117. qdf_mem_free(desc);
  3118. }
  3119. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3120. qdf_list_destroy(&soc->reo_desc_freelist);
  3121. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3122. }
  3123. /**
  3124. * dp_soc_mem_reset() - Reset Dp Soc memory
  3125. * @soc: DP handle
  3126. *
  3127. * Return: None
  3128. */
  3129. static void dp_soc_mem_reset(struct dp_soc *soc)
  3130. {
  3131. uint16_t len = 0;
  3132. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3133. len = sizeof(struct dp_soc) -
  3134. offsetof(struct dp_soc, dp_soc_reinit) -
  3135. sizeof(soc->dp_soc_reinit);
  3136. dp_soc_offset = dp_soc_offset +
  3137. offsetof(struct dp_soc, dp_soc_reinit) +
  3138. sizeof(soc->dp_soc_reinit);
  3139. qdf_mem_zero(dp_soc_offset, len);
  3140. }
  3141. /**
  3142. * dp_soc_deinit() - Deinitialize txrx SOC
  3143. * @txrx_soc: Opaque DP SOC handle
  3144. *
  3145. * Return: None
  3146. */
  3147. static void dp_soc_deinit(void *txrx_soc)
  3148. {
  3149. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3150. int i;
  3151. qdf_atomic_set(&soc->cmn_init_done, 0);
  3152. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3153. if (soc->pdev_list[i])
  3154. dp_pdev_deinit((struct cdp_pdev *)
  3155. soc->pdev_list[i], 1);
  3156. }
  3157. qdf_flush_work(&soc->htt_stats.work);
  3158. qdf_disable_work(&soc->htt_stats.work);
  3159. /* Free pending htt stats messages */
  3160. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3161. dp_reo_cmdlist_destroy(soc);
  3162. dp_peer_find_detach(soc);
  3163. /* Free the ring memories */
  3164. /* Common rings */
  3165. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3166. /* Tx data rings */
  3167. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3168. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3169. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3170. TCL_DATA, i);
  3171. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3172. WBM2SW_RELEASE, i);
  3173. }
  3174. }
  3175. /* TCL command and status rings */
  3176. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3177. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3178. /* Rx data rings */
  3179. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3180. soc->num_reo_dest_rings =
  3181. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3182. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3183. /* TODO: Get number of rings and ring sizes
  3184. * from wlan_cfg
  3185. */
  3186. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3187. REO_DST, i);
  3188. }
  3189. }
  3190. /* REO reinjection ring */
  3191. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3192. /* Rx release ring */
  3193. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3194. /* Rx exception ring */
  3195. /* TODO: Better to store ring_type and ring_num in
  3196. * dp_srng during setup
  3197. */
  3198. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3199. /* REO command and status rings */
  3200. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3201. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3202. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3203. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3204. htt_soc_htc_dealloc(soc->htt_handle);
  3205. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  3206. dp_reo_cmdlist_destroy(soc);
  3207. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  3208. dp_reo_desc_freelist_destroy(soc);
  3209. dp_soc_wds_detach(soc);
  3210. qdf_spinlock_destroy(&soc->ast_lock);
  3211. dp_soc_mem_reset(soc);
  3212. }
  3213. /**
  3214. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3215. * @txrx_soc: Opaque DP SOC handle
  3216. *
  3217. * Return: None
  3218. */
  3219. static void dp_soc_deinit_wifi3(void *txrx_soc)
  3220. {
  3221. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3222. soc->dp_soc_reinit = 1;
  3223. dp_soc_deinit(txrx_soc);
  3224. }
  3225. /*
  3226. * dp_soc_detach() - Detach rest of txrx SOC
  3227. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3228. *
  3229. * Return: None
  3230. */
  3231. static void dp_soc_detach(void *txrx_soc)
  3232. {
  3233. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3234. int i;
  3235. qdf_atomic_set(&soc->cmn_init_done, 0);
  3236. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3237. * SW descriptors
  3238. */
  3239. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3240. if (soc->pdev_list[i])
  3241. dp_pdev_detach((struct cdp_pdev *)
  3242. soc->pdev_list[i], 1);
  3243. }
  3244. /* Free the ring memories */
  3245. /* Common rings */
  3246. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3247. dp_tx_soc_detach(soc);
  3248. /* Tx data rings */
  3249. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3250. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3251. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3252. TCL_DATA, i);
  3253. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3254. WBM2SW_RELEASE, i);
  3255. }
  3256. }
  3257. /* TCL command and status rings */
  3258. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3259. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3260. /* Rx data rings */
  3261. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3262. soc->num_reo_dest_rings =
  3263. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3264. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3265. /* TODO: Get number of rings and ring sizes
  3266. * from wlan_cfg
  3267. */
  3268. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3269. REO_DST, i);
  3270. }
  3271. }
  3272. /* REO reinjection ring */
  3273. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3274. /* Rx release ring */
  3275. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3276. /* Rx exception ring */
  3277. /* TODO: Better to store ring_type and ring_num in
  3278. * dp_srng during setup
  3279. */
  3280. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3281. /* REO command and status rings */
  3282. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3283. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3284. dp_hw_link_desc_pool_cleanup(soc);
  3285. htt_soc_detach(soc->htt_handle);
  3286. soc->dp_soc_reinit = 0;
  3287. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3288. qdf_mem_free(soc);
  3289. }
  3290. /*
  3291. * dp_soc_detach_wifi3() - Detach txrx SOC
  3292. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3293. *
  3294. * Return: None
  3295. */
  3296. static void dp_soc_detach_wifi3(void *txrx_soc)
  3297. {
  3298. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3299. if (soc->dp_soc_reinit) {
  3300. dp_soc_detach(txrx_soc);
  3301. } else {
  3302. dp_soc_deinit(txrx_soc);
  3303. dp_soc_detach(txrx_soc);
  3304. }
  3305. }
  3306. #if !defined(DISABLE_MON_CONFIG)
  3307. /**
  3308. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3309. * @soc: soc handle
  3310. * @pdev: physical device handle
  3311. * @mac_id: ring number
  3312. * @mac_for_pdev: mac_id
  3313. *
  3314. * Return: non-zero for failure, zero for success
  3315. */
  3316. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3317. struct dp_pdev *pdev,
  3318. int mac_id,
  3319. int mac_for_pdev)
  3320. {
  3321. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3322. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3323. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3324. pdev->rxdma_mon_buf_ring[mac_id]
  3325. .hal_srng,
  3326. RXDMA_MONITOR_BUF);
  3327. if (status != QDF_STATUS_SUCCESS) {
  3328. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3329. return status;
  3330. }
  3331. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3332. pdev->rxdma_mon_dst_ring[mac_id]
  3333. .hal_srng,
  3334. RXDMA_MONITOR_DST);
  3335. if (status != QDF_STATUS_SUCCESS) {
  3336. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3337. return status;
  3338. }
  3339. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3340. pdev->rxdma_mon_status_ring[mac_id]
  3341. .hal_srng,
  3342. RXDMA_MONITOR_STATUS);
  3343. if (status != QDF_STATUS_SUCCESS) {
  3344. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3345. return status;
  3346. }
  3347. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3348. pdev->rxdma_mon_desc_ring[mac_id]
  3349. .hal_srng,
  3350. RXDMA_MONITOR_DESC);
  3351. if (status != QDF_STATUS_SUCCESS) {
  3352. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3353. return status;
  3354. }
  3355. } else {
  3356. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3357. pdev->rxdma_mon_status_ring[mac_id]
  3358. .hal_srng,
  3359. RXDMA_MONITOR_STATUS);
  3360. if (status != QDF_STATUS_SUCCESS) {
  3361. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3362. return status;
  3363. }
  3364. }
  3365. return status;
  3366. }
  3367. #else
  3368. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3369. struct dp_pdev *pdev,
  3370. int mac_id,
  3371. int mac_for_pdev)
  3372. {
  3373. return QDF_STATUS_SUCCESS;
  3374. }
  3375. #endif
  3376. /*
  3377. * dp_rxdma_ring_config() - configure the RX DMA rings
  3378. *
  3379. * This function is used to configure the MAC rings.
  3380. * On MCL host provides buffers in Host2FW ring
  3381. * FW refills (copies) buffers to the ring and updates
  3382. * ring_idx in register
  3383. *
  3384. * @soc: data path SoC handle
  3385. *
  3386. * Return: zero on success, non-zero on failure
  3387. */
  3388. #ifdef QCA_HOST2FW_RXBUF_RING
  3389. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3390. {
  3391. int i;
  3392. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3393. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3394. struct dp_pdev *pdev = soc->pdev_list[i];
  3395. if (pdev) {
  3396. int mac_id;
  3397. bool dbs_enable = 0;
  3398. int max_mac_rings =
  3399. wlan_cfg_get_num_mac_rings
  3400. (pdev->wlan_cfg_ctx);
  3401. htt_srng_setup(soc->htt_handle, 0,
  3402. pdev->rx_refill_buf_ring.hal_srng,
  3403. RXDMA_BUF);
  3404. if (pdev->rx_refill_buf_ring2.hal_srng)
  3405. htt_srng_setup(soc->htt_handle, 0,
  3406. pdev->rx_refill_buf_ring2.hal_srng,
  3407. RXDMA_BUF);
  3408. if (soc->cdp_soc.ol_ops->
  3409. is_hw_dbs_2x2_capable) {
  3410. dbs_enable = soc->cdp_soc.ol_ops->
  3411. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  3412. }
  3413. if (dbs_enable) {
  3414. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3415. QDF_TRACE_LEVEL_ERROR,
  3416. FL("DBS enabled max_mac_rings %d"),
  3417. max_mac_rings);
  3418. } else {
  3419. max_mac_rings = 1;
  3420. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3421. QDF_TRACE_LEVEL_ERROR,
  3422. FL("DBS disabled, max_mac_rings %d"),
  3423. max_mac_rings);
  3424. }
  3425. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3426. FL("pdev_id %d max_mac_rings %d"),
  3427. pdev->pdev_id, max_mac_rings);
  3428. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3429. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3430. mac_id, pdev->pdev_id);
  3431. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3432. QDF_TRACE_LEVEL_ERROR,
  3433. FL("mac_id %d"), mac_for_pdev);
  3434. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3435. pdev->rx_mac_buf_ring[mac_id]
  3436. .hal_srng,
  3437. RXDMA_BUF);
  3438. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3439. pdev->rxdma_err_dst_ring[mac_id]
  3440. .hal_srng,
  3441. RXDMA_DST);
  3442. /* Configure monitor mode rings */
  3443. status = dp_mon_htt_srng_setup(soc, pdev,
  3444. mac_id,
  3445. mac_for_pdev);
  3446. if (status != QDF_STATUS_SUCCESS) {
  3447. dp_err("Failed to send htt monitor messages to target");
  3448. return status;
  3449. }
  3450. }
  3451. }
  3452. }
  3453. /*
  3454. * Timer to reap rxdma status rings.
  3455. * Needed until we enable ppdu end interrupts
  3456. */
  3457. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3458. dp_service_mon_rings, (void *)soc,
  3459. QDF_TIMER_TYPE_WAKE_APPS);
  3460. soc->reap_timer_init = 1;
  3461. return status;
  3462. }
  3463. #else
  3464. /* This is only for WIN */
  3465. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3466. {
  3467. int i;
  3468. int mac_id;
  3469. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3470. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3471. struct dp_pdev *pdev = soc->pdev_list[i];
  3472. if (pdev == NULL)
  3473. continue;
  3474. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3475. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3476. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3477. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3478. #ifndef DISABLE_MON_CONFIG
  3479. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3480. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3481. RXDMA_MONITOR_BUF);
  3482. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3483. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  3484. RXDMA_MONITOR_DST);
  3485. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3486. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  3487. RXDMA_MONITOR_STATUS);
  3488. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3489. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  3490. RXDMA_MONITOR_DESC);
  3491. #endif
  3492. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3493. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  3494. RXDMA_DST);
  3495. }
  3496. }
  3497. return status;
  3498. }
  3499. #endif
  3500. /*
  3501. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  3502. * @cdp_soc: Opaque Datapath SOC handle
  3503. *
  3504. * Return: zero on success, non-zero on failure
  3505. */
  3506. static QDF_STATUS
  3507. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  3508. {
  3509. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3510. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3511. htt_soc_attach_target(soc->htt_handle);
  3512. status = dp_rxdma_ring_config(soc);
  3513. if (status != QDF_STATUS_SUCCESS) {
  3514. dp_err("Failed to send htt srng setup messages to target");
  3515. return status;
  3516. }
  3517. DP_STATS_INIT(soc);
  3518. /* initialize work queue for stats processing */
  3519. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3520. return QDF_STATUS_SUCCESS;
  3521. }
  3522. /*
  3523. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  3524. * @txrx_soc: Datapath SOC handle
  3525. */
  3526. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  3527. {
  3528. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3529. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  3530. }
  3531. /*
  3532. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  3533. * @txrx_soc: Datapath SOC handle
  3534. * @nss_cfg: nss config
  3535. */
  3536. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  3537. {
  3538. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3539. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  3540. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  3541. /*
  3542. * TODO: masked out based on the per offloaded radio
  3543. */
  3544. switch (config) {
  3545. case dp_nss_cfg_default:
  3546. break;
  3547. case dp_nss_cfg_dbdc:
  3548. case dp_nss_cfg_dbtc:
  3549. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  3550. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  3551. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  3552. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  3553. break;
  3554. default:
  3555. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3556. "Invalid offload config %d", config);
  3557. }
  3558. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3559. FL("nss-wifi<0> nss config is enabled"));
  3560. }
  3561. /*
  3562. * dp_vdev_attach_wifi3() - attach txrx vdev
  3563. * @txrx_pdev: Datapath PDEV handle
  3564. * @vdev_mac_addr: MAC address of the virtual interface
  3565. * @vdev_id: VDEV Id
  3566. * @wlan_op_mode: VDEV operating mode
  3567. *
  3568. * Return: DP VDEV handle on success, NULL on failure
  3569. */
  3570. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  3571. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  3572. {
  3573. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3574. struct dp_soc *soc = pdev->soc;
  3575. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  3576. if (!vdev) {
  3577. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3578. FL("DP VDEV memory allocation failed"));
  3579. goto fail0;
  3580. }
  3581. vdev->pdev = pdev;
  3582. vdev->vdev_id = vdev_id;
  3583. vdev->opmode = op_mode;
  3584. vdev->osdev = soc->osdev;
  3585. vdev->osif_rx = NULL;
  3586. vdev->osif_rsim_rx_decap = NULL;
  3587. vdev->osif_get_key = NULL;
  3588. vdev->osif_rx_mon = NULL;
  3589. vdev->osif_tx_free_ext = NULL;
  3590. vdev->osif_vdev = NULL;
  3591. vdev->delete.pending = 0;
  3592. vdev->safemode = 0;
  3593. vdev->drop_unenc = 1;
  3594. vdev->sec_type = cdp_sec_type_none;
  3595. #ifdef notyet
  3596. vdev->filters_num = 0;
  3597. #endif
  3598. qdf_mem_copy(
  3599. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3600. /* TODO: Initialize default HTT meta data that will be used in
  3601. * TCL descriptors for packets transmitted from this VDEV
  3602. */
  3603. TAILQ_INIT(&vdev->peer_list);
  3604. if ((soc->intr_mode == DP_INTR_POLL) &&
  3605. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  3606. if ((pdev->vdev_count == 0) ||
  3607. (wlan_op_mode_monitor == vdev->opmode))
  3608. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  3609. }
  3610. if (wlan_op_mode_monitor == vdev->opmode) {
  3611. pdev->monitor_vdev = vdev;
  3612. return (struct cdp_vdev *)vdev;
  3613. }
  3614. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3615. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3616. vdev->dscp_tid_map_id = 0;
  3617. vdev->mcast_enhancement_en = 0;
  3618. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  3619. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3620. /* add this vdev into the pdev's list */
  3621. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  3622. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3623. pdev->vdev_count++;
  3624. dp_tx_vdev_attach(vdev);
  3625. if (pdev->vdev_count == 1)
  3626. dp_lro_hash_setup(soc, pdev);
  3627. /* LRO */
  3628. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  3629. wlan_op_mode_sta == vdev->opmode)
  3630. vdev->lro_enable = true;
  3631. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3632. "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
  3633. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3634. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  3635. DP_STATS_INIT(vdev);
  3636. if (wlan_op_mode_sta == vdev->opmode)
  3637. dp_peer_create_wifi3((struct cdp_vdev *)vdev,
  3638. vdev->mac_addr.raw,
  3639. NULL);
  3640. return (struct cdp_vdev *)vdev;
  3641. fail0:
  3642. return NULL;
  3643. }
  3644. /**
  3645. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  3646. * @vdev: Datapath VDEV handle
  3647. * @osif_vdev: OSIF vdev handle
  3648. * @ctrl_vdev: UMAC vdev handle
  3649. * @txrx_ops: Tx and Rx operations
  3650. *
  3651. * Return: DP VDEV handle on success, NULL on failure
  3652. */
  3653. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  3654. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  3655. struct ol_txrx_ops *txrx_ops)
  3656. {
  3657. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3658. vdev->osif_vdev = osif_vdev;
  3659. vdev->ctrl_vdev = ctrl_vdev;
  3660. vdev->osif_rx = txrx_ops->rx.rx;
  3661. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  3662. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  3663. vdev->osif_get_key = txrx_ops->get_key;
  3664. vdev->osif_rx_mon = txrx_ops->rx.mon;
  3665. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  3666. #ifdef notyet
  3667. #if ATH_SUPPORT_WAPI
  3668. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  3669. #endif
  3670. #endif
  3671. #ifdef UMAC_SUPPORT_PROXY_ARP
  3672. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  3673. #endif
  3674. vdev->me_convert = txrx_ops->me_convert;
  3675. /* TODO: Enable the following once Tx code is integrated */
  3676. if (vdev->mesh_vdev)
  3677. txrx_ops->tx.tx = dp_tx_send_mesh;
  3678. else
  3679. txrx_ops->tx.tx = dp_tx_send;
  3680. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  3681. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  3682. "DP Vdev Register success");
  3683. }
  3684. /**
  3685. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  3686. * @vdev: Datapath VDEV handle
  3687. *
  3688. * Return: void
  3689. */
  3690. static void dp_vdev_flush_peers(struct dp_vdev *vdev)
  3691. {
  3692. struct dp_pdev *pdev = vdev->pdev;
  3693. struct dp_soc *soc = pdev->soc;
  3694. struct dp_peer *peer;
  3695. uint16_t *peer_ids;
  3696. uint8_t i = 0, j = 0;
  3697. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  3698. if (!peer_ids) {
  3699. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3700. "DP alloc failure - unable to flush peers");
  3701. return;
  3702. }
  3703. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3704. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3705. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3706. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  3707. if (j < soc->max_peers)
  3708. peer_ids[j++] = peer->peer_ids[i];
  3709. }
  3710. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3711. for (i = 0; i < j ; i++)
  3712. dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
  3713. NULL, 0);
  3714. qdf_mem_free(peer_ids);
  3715. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3716. FL("Flushed peers for vdev object %pK "), vdev);
  3717. }
  3718. /*
  3719. * dp_vdev_detach_wifi3() - Detach txrx vdev
  3720. * @txrx_vdev: Datapath VDEV handle
  3721. * @callback: Callback OL_IF on completion of detach
  3722. * @cb_context: Callback context
  3723. *
  3724. */
  3725. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  3726. ol_txrx_vdev_delete_cb callback, void *cb_context)
  3727. {
  3728. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3729. struct dp_pdev *pdev = vdev->pdev;
  3730. struct dp_soc *soc = pdev->soc;
  3731. struct dp_neighbour_peer *peer = NULL;
  3732. /* preconditions */
  3733. qdf_assert(vdev);
  3734. if (wlan_op_mode_monitor == vdev->opmode)
  3735. goto free_vdev;
  3736. if (wlan_op_mode_sta == vdev->opmode)
  3737. dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
  3738. /*
  3739. * If Target is hung, flush all peers before detaching vdev
  3740. * this will free all references held due to missing
  3741. * unmap commands from Target
  3742. */
  3743. if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
  3744. dp_vdev_flush_peers(vdev);
  3745. /*
  3746. * Use peer_ref_mutex while accessing peer_list, in case
  3747. * a peer is in the process of being removed from the list.
  3748. */
  3749. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3750. /* check that the vdev has no peers allocated */
  3751. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  3752. /* debug print - will be removed later */
  3753. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  3754. FL("not deleting vdev object %pK (%pM)"
  3755. "until deletion finishes for all its peers"),
  3756. vdev, vdev->mac_addr.raw);
  3757. /* indicate that the vdev needs to be deleted */
  3758. vdev->delete.pending = 1;
  3759. vdev->delete.callback = callback;
  3760. vdev->delete.context = cb_context;
  3761. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3762. return;
  3763. }
  3764. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3765. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  3766. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  3767. neighbour_peer_list_elem) {
  3768. QDF_ASSERT(peer->vdev != vdev);
  3769. }
  3770. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  3771. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3772. dp_tx_vdev_detach(vdev);
  3773. /* remove the vdev from its parent pdev's list */
  3774. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  3775. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3776. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  3777. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3778. free_vdev:
  3779. qdf_mem_free(vdev);
  3780. if (callback)
  3781. callback(cb_context);
  3782. }
  3783. /*
  3784. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  3785. * @soc - datapath soc handle
  3786. * @peer - datapath peer handle
  3787. *
  3788. * Delete the AST entries belonging to a peer
  3789. */
  3790. #ifdef FEATURE_AST
  3791. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3792. struct dp_peer *peer)
  3793. {
  3794. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  3795. qdf_spin_lock_bh(&soc->ast_lock);
  3796. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  3797. dp_peer_del_ast(soc, ast_entry);
  3798. peer->self_ast_entry = NULL;
  3799. TAILQ_INIT(&peer->ast_entry_list);
  3800. qdf_spin_unlock_bh(&soc->ast_lock);
  3801. }
  3802. #else
  3803. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3804. struct dp_peer *peer)
  3805. {
  3806. }
  3807. #endif
  3808. #if ATH_SUPPORT_WRAP
  3809. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3810. uint8_t *peer_mac_addr)
  3811. {
  3812. struct dp_peer *peer;
  3813. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3814. 0, vdev->vdev_id);
  3815. if (!peer)
  3816. return NULL;
  3817. if (peer->bss_peer)
  3818. return peer;
  3819. dp_peer_unref_delete(peer);
  3820. return NULL;
  3821. }
  3822. #else
  3823. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3824. uint8_t *peer_mac_addr)
  3825. {
  3826. struct dp_peer *peer;
  3827. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3828. 0, vdev->vdev_id);
  3829. if (!peer)
  3830. return NULL;
  3831. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  3832. return peer;
  3833. dp_peer_unref_delete(peer);
  3834. return NULL;
  3835. }
  3836. #endif
  3837. #if defined(FEATURE_AST)
  3838. #if !defined(AST_HKV1_WORKAROUND)
  3839. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3840. uint8_t *peer_mac_addr)
  3841. {
  3842. struct dp_ast_entry *ast_entry;
  3843. qdf_spin_lock_bh(&soc->ast_lock);
  3844. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3845. if (ast_entry && ast_entry->next_hop)
  3846. dp_peer_del_ast(soc, ast_entry);
  3847. qdf_spin_unlock_bh(&soc->ast_lock);
  3848. }
  3849. #else
  3850. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3851. uint8_t *peer_mac_addr)
  3852. {
  3853. struct dp_ast_entry *ast_entry;
  3854. if (soc->ast_override_support) {
  3855. qdf_spin_lock_bh(&soc->ast_lock);
  3856. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3857. if (ast_entry && ast_entry->next_hop)
  3858. dp_peer_del_ast(soc, ast_entry);
  3859. qdf_spin_unlock_bh(&soc->ast_lock);
  3860. }
  3861. }
  3862. #endif
  3863. #else
  3864. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3865. uint8_t *peer_mac_addr)
  3866. {
  3867. }
  3868. #endif
  3869. /*
  3870. * dp_peer_create_wifi3() - attach txrx peer
  3871. * @txrx_vdev: Datapath VDEV handle
  3872. * @peer_mac_addr: Peer MAC address
  3873. *
  3874. * Return: DP peeer handle on success, NULL on failure
  3875. */
  3876. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  3877. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  3878. {
  3879. struct dp_peer *peer;
  3880. int i;
  3881. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3882. struct dp_pdev *pdev;
  3883. struct dp_soc *soc;
  3884. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  3885. /* preconditions */
  3886. qdf_assert(vdev);
  3887. qdf_assert(peer_mac_addr);
  3888. pdev = vdev->pdev;
  3889. soc = pdev->soc;
  3890. /*
  3891. * If a peer entry with given MAC address already exists,
  3892. * reuse the peer and reset the state of peer.
  3893. */
  3894. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  3895. if (peer) {
  3896. qdf_atomic_init(&peer->is_default_route_set);
  3897. dp_peer_cleanup(vdev, peer);
  3898. peer->delete_in_progress = false;
  3899. dp_peer_delete_ast_entries(soc, peer);
  3900. if ((vdev->opmode == wlan_op_mode_sta) &&
  3901. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  3902. DP_MAC_ADDR_LEN)) {
  3903. ast_type = CDP_TXRX_AST_TYPE_SELF;
  3904. }
  3905. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  3906. /*
  3907. * Control path maintains a node count which is incremented
  3908. * for every new peer create command. Since new peer is not being
  3909. * created and earlier reference is reused here,
  3910. * peer_unref_delete event is sent to control path to
  3911. * increment the count back.
  3912. */
  3913. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  3914. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  3915. vdev->vdev_id, peer->mac_addr.raw);
  3916. }
  3917. peer->ctrl_peer = ctrl_peer;
  3918. dp_local_peer_id_alloc(pdev, peer);
  3919. DP_STATS_INIT(peer);
  3920. return (void *)peer;
  3921. } else {
  3922. /*
  3923. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  3924. * need to remove the AST entry which was earlier added as a WDS
  3925. * entry.
  3926. * If an AST entry exists, but no peer entry exists with a given
  3927. * MAC addresses, we could deduce it as a WDS entry
  3928. */
  3929. dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
  3930. }
  3931. #ifdef notyet
  3932. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  3933. soc->mempool_ol_ath_peer);
  3934. #else
  3935. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  3936. #endif
  3937. if (!peer)
  3938. return NULL; /* failure */
  3939. qdf_mem_zero(peer, sizeof(struct dp_peer));
  3940. TAILQ_INIT(&peer->ast_entry_list);
  3941. /* store provided params */
  3942. peer->vdev = vdev;
  3943. peer->ctrl_peer = ctrl_peer;
  3944. if ((vdev->opmode == wlan_op_mode_sta) &&
  3945. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  3946. DP_MAC_ADDR_LEN)) {
  3947. ast_type = CDP_TXRX_AST_TYPE_SELF;
  3948. }
  3949. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  3950. qdf_spinlock_create(&peer->peer_info_lock);
  3951. qdf_mem_copy(
  3952. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3953. /* TODO: See of rx_opt_proc is really required */
  3954. peer->rx_opt_proc = soc->rx_opt_proc;
  3955. /* initialize the peer_id */
  3956. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3957. peer->peer_ids[i] = HTT_INVALID_PEER;
  3958. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3959. qdf_atomic_init(&peer->ref_cnt);
  3960. /* keep one reference for attach */
  3961. qdf_atomic_inc(&peer->ref_cnt);
  3962. /* add this peer into the vdev's list */
  3963. if (wlan_op_mode_sta == vdev->opmode)
  3964. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  3965. else
  3966. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  3967. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3968. /* TODO: See if hash based search is required */
  3969. dp_peer_find_hash_add(soc, peer);
  3970. /* Initialize the peer state */
  3971. peer->state = OL_TXRX_PEER_STATE_DISC;
  3972. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3973. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  3974. vdev, peer, peer->mac_addr.raw,
  3975. qdf_atomic_read(&peer->ref_cnt));
  3976. /*
  3977. * For every peer MAp message search and set if bss_peer
  3978. */
  3979. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  3980. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3981. "vdev bss_peer!!!!");
  3982. peer->bss_peer = 1;
  3983. vdev->vap_bss_peer = peer;
  3984. }
  3985. for (i = 0; i < DP_MAX_TIDS; i++)
  3986. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  3987. dp_local_peer_id_alloc(pdev, peer);
  3988. DP_STATS_INIT(peer);
  3989. return (void *)peer;
  3990. }
  3991. /*
  3992. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  3993. * @vdev: Datapath VDEV handle
  3994. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  3995. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  3996. *
  3997. * Return: None
  3998. */
  3999. static
  4000. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4001. enum cdp_host_reo_dest_ring *reo_dest,
  4002. bool *hash_based)
  4003. {
  4004. struct dp_soc *soc;
  4005. struct dp_pdev *pdev;
  4006. pdev = vdev->pdev;
  4007. soc = pdev->soc;
  4008. /*
  4009. * hash based steering is disabled for Radios which are offloaded
  4010. * to NSS
  4011. */
  4012. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4013. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4014. /*
  4015. * Below line of code will ensure the proper reo_dest ring is chosen
  4016. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4017. */
  4018. *reo_dest = pdev->reo_dest;
  4019. }
  4020. #ifdef IPA_OFFLOAD
  4021. /*
  4022. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4023. * @vdev: Datapath VDEV handle
  4024. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4025. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4026. *
  4027. * If IPA is enabled in ini, for SAP mode, disable hash based
  4028. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4029. * Return: None
  4030. */
  4031. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4032. enum cdp_host_reo_dest_ring *reo_dest,
  4033. bool *hash_based)
  4034. {
  4035. struct dp_soc *soc;
  4036. struct dp_pdev *pdev;
  4037. pdev = vdev->pdev;
  4038. soc = pdev->soc;
  4039. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4040. /*
  4041. * If IPA is enabled, disable hash-based flow steering and set
  4042. * reo_dest_ring_4 as the REO ring to receive packets on.
  4043. * IPA is configured to reap reo_dest_ring_4.
  4044. *
  4045. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4046. * value enum value is from 1 - 4.
  4047. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4048. */
  4049. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4050. if (vdev->opmode == wlan_op_mode_ap) {
  4051. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4052. *hash_based = 0;
  4053. }
  4054. }
  4055. }
  4056. #else
  4057. /*
  4058. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4059. * @vdev: Datapath VDEV handle
  4060. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4061. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4062. *
  4063. * Use system config values for hash based steering.
  4064. * Return: None
  4065. */
  4066. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4067. enum cdp_host_reo_dest_ring *reo_dest,
  4068. bool *hash_based)
  4069. {
  4070. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4071. }
  4072. #endif /* IPA_OFFLOAD */
  4073. /*
  4074. * dp_peer_setup_wifi3() - initialize the peer
  4075. * @vdev_hdl: virtual device object
  4076. * @peer: Peer object
  4077. *
  4078. * Return: void
  4079. */
  4080. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4081. {
  4082. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  4083. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  4084. struct dp_pdev *pdev;
  4085. struct dp_soc *soc;
  4086. bool hash_based = 0;
  4087. enum cdp_host_reo_dest_ring reo_dest;
  4088. /* preconditions */
  4089. qdf_assert(vdev);
  4090. qdf_assert(peer);
  4091. pdev = vdev->pdev;
  4092. soc = pdev->soc;
  4093. peer->last_assoc_rcvd = 0;
  4094. peer->last_disassoc_rcvd = 0;
  4095. peer->last_deauth_rcvd = 0;
  4096. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4097. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4098. pdev->pdev_id, vdev->vdev_id,
  4099. vdev->opmode, hash_based, reo_dest);
  4100. /*
  4101. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4102. * i.e both the devices have same MAC address. In these
  4103. * cases we want such pkts to be processed in NULL Q handler
  4104. * which is REO2TCL ring. for this reason we should
  4105. * not setup reo_queues and default route for bss_peer.
  4106. */
  4107. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
  4108. return;
  4109. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4110. /* TODO: Check the destination ring number to be passed to FW */
  4111. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4112. pdev->ctrl_pdev, peer->mac_addr.raw,
  4113. peer->vdev->vdev_id, hash_based, reo_dest);
  4114. }
  4115. qdf_atomic_set(&peer->is_default_route_set, 1);
  4116. dp_peer_rx_init(pdev, peer);
  4117. return;
  4118. }
  4119. /*
  4120. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  4121. * @vdev_handle: virtual device object
  4122. * @htt_pkt_type: type of pkt
  4123. *
  4124. * Return: void
  4125. */
  4126. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  4127. enum htt_cmn_pkt_type val)
  4128. {
  4129. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4130. vdev->tx_encap_type = val;
  4131. }
  4132. /*
  4133. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  4134. * @vdev_handle: virtual device object
  4135. * @htt_pkt_type: type of pkt
  4136. *
  4137. * Return: void
  4138. */
  4139. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  4140. enum htt_cmn_pkt_type val)
  4141. {
  4142. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4143. vdev->rx_decap_type = val;
  4144. }
  4145. /*
  4146. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  4147. * @txrx_soc: cdp soc handle
  4148. * @ac: Access category
  4149. * @value: timeout value in millisec
  4150. *
  4151. * Return: void
  4152. */
  4153. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4154. uint8_t ac, uint32_t value)
  4155. {
  4156. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4157. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  4158. }
  4159. /*
  4160. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  4161. * @txrx_soc: cdp soc handle
  4162. * @ac: access category
  4163. * @value: timeout value in millisec
  4164. *
  4165. * Return: void
  4166. */
  4167. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4168. uint8_t ac, uint32_t *value)
  4169. {
  4170. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4171. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  4172. }
  4173. /*
  4174. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  4175. * @pdev_handle: physical device object
  4176. * @val: reo destination ring index (1 - 4)
  4177. *
  4178. * Return: void
  4179. */
  4180. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  4181. enum cdp_host_reo_dest_ring val)
  4182. {
  4183. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4184. if (pdev)
  4185. pdev->reo_dest = val;
  4186. }
  4187. /*
  4188. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  4189. * @pdev_handle: physical device object
  4190. *
  4191. * Return: reo destination ring index
  4192. */
  4193. static enum cdp_host_reo_dest_ring
  4194. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  4195. {
  4196. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4197. if (pdev)
  4198. return pdev->reo_dest;
  4199. else
  4200. return cdp_host_reo_dest_ring_unknown;
  4201. }
  4202. /*
  4203. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  4204. * @pdev_handle: device object
  4205. * @val: value to be set
  4206. *
  4207. * Return: void
  4208. */
  4209. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  4210. uint32_t val)
  4211. {
  4212. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4213. /* Enable/Disable smart mesh filtering. This flag will be checked
  4214. * during rx processing to check if packets are from NAC clients.
  4215. */
  4216. pdev->filter_neighbour_peers = val;
  4217. return 0;
  4218. }
  4219. /*
  4220. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  4221. * address for smart mesh filtering
  4222. * @vdev_handle: virtual device object
  4223. * @cmd: Add/Del command
  4224. * @macaddr: nac client mac address
  4225. *
  4226. * Return: void
  4227. */
  4228. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  4229. uint32_t cmd, uint8_t *macaddr)
  4230. {
  4231. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4232. struct dp_pdev *pdev = vdev->pdev;
  4233. struct dp_neighbour_peer *peer = NULL;
  4234. if (!macaddr)
  4235. goto fail0;
  4236. /* Store address of NAC (neighbour peer) which will be checked
  4237. * against TA of received packets.
  4238. */
  4239. if (cmd == DP_NAC_PARAM_ADD) {
  4240. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  4241. sizeof(*peer));
  4242. if (!peer) {
  4243. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4244. FL("DP neighbour peer node memory allocation failed"));
  4245. goto fail0;
  4246. }
  4247. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  4248. macaddr, DP_MAC_ADDR_LEN);
  4249. peer->vdev = vdev;
  4250. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4251. /* add this neighbour peer into the list */
  4252. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  4253. neighbour_peer_list_elem);
  4254. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4255. /* first neighbour */
  4256. if (!pdev->neighbour_peers_added) {
  4257. pdev->neighbour_peers_added = true;
  4258. dp_ppdu_ring_cfg(pdev);
  4259. }
  4260. return 1;
  4261. } else if (cmd == DP_NAC_PARAM_DEL) {
  4262. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4263. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4264. neighbour_peer_list_elem) {
  4265. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  4266. macaddr, DP_MAC_ADDR_LEN)) {
  4267. /* delete this peer from the list */
  4268. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  4269. peer, neighbour_peer_list_elem);
  4270. qdf_mem_free(peer);
  4271. break;
  4272. }
  4273. }
  4274. /* last neighbour deleted */
  4275. if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
  4276. pdev->neighbour_peers_added = false;
  4277. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4278. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  4279. !pdev->enhanced_stats_en)
  4280. dp_ppdu_ring_reset(pdev);
  4281. return 1;
  4282. }
  4283. fail0:
  4284. return 0;
  4285. }
  4286. /*
  4287. * dp_get_sec_type() - Get the security type
  4288. * @peer: Datapath peer handle
  4289. * @sec_idx: Security id (mcast, ucast)
  4290. *
  4291. * return sec_type: Security type
  4292. */
  4293. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  4294. {
  4295. struct dp_peer *dpeer = (struct dp_peer *)peer;
  4296. return dpeer->security[sec_idx].sec_type;
  4297. }
  4298. /*
  4299. * dp_peer_authorize() - authorize txrx peer
  4300. * @peer_handle: Datapath peer handle
  4301. * @authorize
  4302. *
  4303. */
  4304. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  4305. {
  4306. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4307. struct dp_soc *soc;
  4308. if (peer != NULL) {
  4309. soc = peer->vdev->pdev->soc;
  4310. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4311. peer->authorize = authorize ? 1 : 0;
  4312. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4313. }
  4314. }
  4315. static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
  4316. struct dp_pdev *pdev,
  4317. struct dp_peer *peer,
  4318. uint32_t vdev_id)
  4319. {
  4320. struct dp_vdev *vdev = NULL;
  4321. struct dp_peer *bss_peer = NULL;
  4322. uint8_t *m_addr = NULL;
  4323. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4324. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4325. if (vdev->vdev_id == vdev_id)
  4326. break;
  4327. }
  4328. if (!vdev) {
  4329. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4330. "vdev is NULL");
  4331. } else {
  4332. if (vdev->vap_bss_peer == peer)
  4333. vdev->vap_bss_peer = NULL;
  4334. m_addr = peer->mac_addr.raw;
  4335. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  4336. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4337. vdev_id, m_addr);
  4338. if (vdev && vdev->vap_bss_peer) {
  4339. bss_peer = vdev->vap_bss_peer;
  4340. DP_UPDATE_STATS(vdev, peer);
  4341. }
  4342. }
  4343. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4344. qdf_mem_free(peer);
  4345. }
  4346. /**
  4347. * dp_delete_pending_vdev() - check and process vdev delete
  4348. * @pdev: DP specific pdev pointer
  4349. * @vdev: DP specific vdev pointer
  4350. * @vdev_id: vdev id corresponding to vdev
  4351. *
  4352. * This API does following:
  4353. * 1) It releases tx flow pools buffers as vdev is
  4354. * going down and no peers are associated.
  4355. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  4356. */
  4357. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4358. uint8_t vdev_id)
  4359. {
  4360. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  4361. void *vdev_delete_context = NULL;
  4362. vdev_delete_cb = vdev->delete.callback;
  4363. vdev_delete_context = vdev->delete.context;
  4364. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4365. FL("deleting vdev object %pK (%pM)- its last peer is done"),
  4366. vdev, vdev->mac_addr.raw);
  4367. /* all peers are gone, go ahead and delete it */
  4368. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  4369. FLOW_TYPE_VDEV, vdev_id);
  4370. dp_tx_vdev_detach(vdev);
  4371. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4372. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4373. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4374. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4375. FL("deleting vdev object %pK (%pM)"),
  4376. vdev, vdev->mac_addr.raw);
  4377. qdf_mem_free(vdev);
  4378. vdev = NULL;
  4379. if (vdev_delete_cb)
  4380. vdev_delete_cb(vdev_delete_context);
  4381. }
  4382. /*
  4383. * dp_peer_unref_delete() - unref and delete peer
  4384. * @peer_handle: Datapath peer handle
  4385. *
  4386. */
  4387. void dp_peer_unref_delete(void *peer_handle)
  4388. {
  4389. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4390. struct dp_vdev *vdev = peer->vdev;
  4391. struct dp_pdev *pdev = vdev->pdev;
  4392. struct dp_soc *soc = pdev->soc;
  4393. struct dp_peer *tmppeer;
  4394. int found = 0;
  4395. uint16_t peer_id;
  4396. uint16_t vdev_id;
  4397. bool delete_vdev;
  4398. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4399. "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
  4400. peer, qdf_atomic_read(&peer->ref_cnt));
  4401. /*
  4402. * Hold the lock all the way from checking if the peer ref count
  4403. * is zero until the peer references are removed from the hash
  4404. * table and vdev list (if the peer ref count is zero).
  4405. * This protects against a new HL tx operation starting to use the
  4406. * peer object just after this function concludes it's done being used.
  4407. * Furthermore, the lock needs to be held while checking whether the
  4408. * vdev's list of peers is empty, to make sure that list is not modified
  4409. * concurrently with the empty check.
  4410. */
  4411. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4412. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  4413. peer_id = peer->peer_ids[0];
  4414. vdev_id = vdev->vdev_id;
  4415. /*
  4416. * Make sure that the reference to the peer in
  4417. * peer object map is removed
  4418. */
  4419. if (peer_id != HTT_INVALID_PEER)
  4420. soc->peer_id_to_obj_map[peer_id] = NULL;
  4421. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4422. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  4423. /* remove the reference to the peer from the hash table */
  4424. dp_peer_find_hash_remove(soc, peer);
  4425. qdf_spin_lock_bh(&soc->ast_lock);
  4426. if (peer->self_ast_entry) {
  4427. dp_peer_del_ast(soc, peer->self_ast_entry);
  4428. peer->self_ast_entry = NULL;
  4429. }
  4430. qdf_spin_unlock_bh(&soc->ast_lock);
  4431. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  4432. if (tmppeer == peer) {
  4433. found = 1;
  4434. break;
  4435. }
  4436. }
  4437. if (found) {
  4438. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  4439. peer_list_elem);
  4440. } else {
  4441. /*Ignoring the remove operation as peer not found*/
  4442. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4443. "peer:%pK not found in vdev:%pK peerlist:%pK",
  4444. peer, vdev, &peer->vdev->peer_list);
  4445. }
  4446. /* cleanup the peer data */
  4447. dp_peer_cleanup(vdev, peer);
  4448. /* check whether the parent vdev has no peers left */
  4449. if (TAILQ_EMPTY(&vdev->peer_list)) {
  4450. /*
  4451. * capture vdev delete pending flag's status
  4452. * while holding peer_ref_mutex lock
  4453. */
  4454. delete_vdev = vdev->delete.pending;
  4455. /*
  4456. * Now that there are no references to the peer, we can
  4457. * release the peer reference lock.
  4458. */
  4459. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4460. /*
  4461. * Check if the parent vdev was waiting for its peers
  4462. * to be deleted, in order for it to be deleted too.
  4463. */
  4464. if (delete_vdev)
  4465. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  4466. } else {
  4467. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4468. }
  4469. dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
  4470. } else {
  4471. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4472. }
  4473. }
  4474. /*
  4475. * dp_peer_detach_wifi3() – Detach txrx peer
  4476. * @peer_handle: Datapath peer handle
  4477. * @bitmap: bitmap indicating special handling of request.
  4478. *
  4479. */
  4480. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  4481. {
  4482. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4483. /* redirect the peer's rx delivery function to point to a
  4484. * discard func
  4485. */
  4486. peer->rx_opt_proc = dp_rx_discard;
  4487. peer->ctrl_peer = NULL;
  4488. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4489. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  4490. dp_local_peer_id_free(peer->vdev->pdev, peer);
  4491. qdf_spinlock_destroy(&peer->peer_info_lock);
  4492. /*
  4493. * Remove the reference added during peer_attach.
  4494. * The peer will still be left allocated until the
  4495. * PEER_UNMAP message arrives to remove the other
  4496. * reference, added by the PEER_MAP message.
  4497. */
  4498. dp_peer_unref_delete(peer_handle);
  4499. }
  4500. /*
  4501. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  4502. * @peer_handle: Datapath peer handle
  4503. *
  4504. */
  4505. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  4506. {
  4507. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4508. return vdev->mac_addr.raw;
  4509. }
  4510. /*
  4511. * dp_vdev_set_wds() - Enable per packet stats
  4512. * @vdev_handle: DP VDEV handle
  4513. * @val: value
  4514. *
  4515. * Return: none
  4516. */
  4517. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  4518. {
  4519. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4520. vdev->wds_enabled = val;
  4521. return 0;
  4522. }
  4523. /*
  4524. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  4525. * @peer_handle: Datapath peer handle
  4526. *
  4527. */
  4528. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  4529. uint8_t vdev_id)
  4530. {
  4531. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4532. struct dp_vdev *vdev = NULL;
  4533. if (qdf_unlikely(!pdev))
  4534. return NULL;
  4535. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4536. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4537. if (vdev->vdev_id == vdev_id)
  4538. break;
  4539. }
  4540. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4541. return (struct cdp_vdev *)vdev;
  4542. }
  4543. /*
  4544. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
  4545. * @dev: PDEV handle
  4546. *
  4547. * Return: VDEV handle of monitor mode
  4548. */
  4549. static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
  4550. {
  4551. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4552. if (qdf_unlikely(!pdev))
  4553. return NULL;
  4554. return (struct cdp_vdev *)pdev->monitor_vdev;
  4555. }
  4556. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  4557. {
  4558. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4559. return vdev->opmode;
  4560. }
  4561. static
  4562. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
  4563. ol_txrx_rx_fp *stack_fn_p,
  4564. ol_osif_vdev_handle *osif_vdev_p)
  4565. {
  4566. struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
  4567. qdf_assert(vdev);
  4568. *stack_fn_p = vdev->osif_rx_stack;
  4569. *osif_vdev_p = vdev->osif_vdev;
  4570. }
  4571. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  4572. {
  4573. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4574. struct dp_pdev *pdev = vdev->pdev;
  4575. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  4576. }
  4577. /**
  4578. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  4579. * ring based on target
  4580. * @soc: soc handle
  4581. * @mac_for_pdev: pdev_id
  4582. * @pdev: physical device handle
  4583. * @ring_num: mac id
  4584. * @htt_tlv_filter: tlv filter
  4585. *
  4586. * Return: zero on success, non-zero on failure
  4587. */
  4588. static inline
  4589. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  4590. struct dp_pdev *pdev, uint8_t ring_num,
  4591. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  4592. {
  4593. QDF_STATUS status;
  4594. if (soc->wlan_cfg_ctx->rxdma1_enable)
  4595. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4596. pdev->rxdma_mon_buf_ring[ring_num]
  4597. .hal_srng,
  4598. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  4599. &htt_tlv_filter);
  4600. else
  4601. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4602. pdev->rx_mac_buf_ring[ring_num]
  4603. .hal_srng,
  4604. RXDMA_BUF, RX_BUFFER_SIZE,
  4605. &htt_tlv_filter);
  4606. return status;
  4607. }
  4608. /**
  4609. * dp_reset_monitor_mode() - Disable monitor mode
  4610. * @pdev_handle: Datapath PDEV handle
  4611. *
  4612. * Return: 0 on success, not 0 on failure
  4613. */
  4614. static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  4615. {
  4616. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4617. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4618. struct dp_soc *soc = pdev->soc;
  4619. uint8_t pdev_id;
  4620. int mac_id;
  4621. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4622. pdev_id = pdev->pdev_id;
  4623. soc = pdev->soc;
  4624. qdf_spin_lock_bh(&pdev->mon_lock);
  4625. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4626. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4627. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4628. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4629. pdev, mac_id,
  4630. htt_tlv_filter);
  4631. if (status != QDF_STATUS_SUCCESS) {
  4632. dp_err("Failed to send tlv filter for monitor mode rings");
  4633. return status;
  4634. }
  4635. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4636. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4637. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  4638. &htt_tlv_filter);
  4639. }
  4640. pdev->monitor_vdev = NULL;
  4641. qdf_spin_unlock_bh(&pdev->mon_lock);
  4642. return QDF_STATUS_SUCCESS;
  4643. }
  4644. /**
  4645. * dp_set_nac() - set peer_nac
  4646. * @peer_handle: Datapath PEER handle
  4647. *
  4648. * Return: void
  4649. */
  4650. static void dp_set_nac(struct cdp_peer *peer_handle)
  4651. {
  4652. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4653. peer->nac = 1;
  4654. }
  4655. /**
  4656. * dp_get_tx_pending() - read pending tx
  4657. * @pdev_handle: Datapath PDEV handle
  4658. *
  4659. * Return: outstanding tx
  4660. */
  4661. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  4662. {
  4663. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4664. return qdf_atomic_read(&pdev->num_tx_outstanding);
  4665. }
  4666. /**
  4667. * dp_get_peer_mac_from_peer_id() - get peer mac
  4668. * @pdev_handle: Datapath PDEV handle
  4669. * @peer_id: Peer ID
  4670. * @peer_mac: MAC addr of PEER
  4671. *
  4672. * Return: void
  4673. */
  4674. static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
  4675. uint32_t peer_id, uint8_t *peer_mac)
  4676. {
  4677. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4678. struct dp_peer *peer;
  4679. if (pdev && peer_mac) {
  4680. peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
  4681. if (peer) {
  4682. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  4683. DP_MAC_ADDR_LEN);
  4684. dp_peer_unref_del_find_by_id(peer);
  4685. }
  4686. }
  4687. }
  4688. /**
  4689. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  4690. * @vdev_handle: Datapath VDEV handle
  4691. * @smart_monitor: Flag to denote if its smart monitor mode
  4692. *
  4693. * Return: 0 on success, not 0 on failure
  4694. */
  4695. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  4696. uint8_t smart_monitor)
  4697. {
  4698. /* Many monitor VAPs can exists in a system but only one can be up at
  4699. * anytime
  4700. */
  4701. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4702. struct dp_pdev *pdev;
  4703. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4704. struct dp_soc *soc;
  4705. uint8_t pdev_id;
  4706. int mac_id;
  4707. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4708. qdf_assert(vdev);
  4709. pdev = vdev->pdev;
  4710. pdev_id = pdev->pdev_id;
  4711. soc = pdev->soc;
  4712. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4713. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4714. pdev, pdev_id, soc, vdev);
  4715. /*Check if current pdev's monitor_vdev exists */
  4716. if (pdev->monitor_vdev) {
  4717. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4718. "vdev=%pK", vdev);
  4719. qdf_assert(vdev);
  4720. }
  4721. pdev->monitor_vdev = vdev;
  4722. /* If smart monitor mode, do not configure monitor ring */
  4723. if (smart_monitor)
  4724. return QDF_STATUS_SUCCESS;
  4725. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4726. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4727. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4728. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4729. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4730. pdev->mo_data_filter);
  4731. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4732. htt_tlv_filter.mpdu_start = 1;
  4733. htt_tlv_filter.msdu_start = 1;
  4734. htt_tlv_filter.packet = 1;
  4735. htt_tlv_filter.msdu_end = 1;
  4736. htt_tlv_filter.mpdu_end = 1;
  4737. htt_tlv_filter.packet_header = 1;
  4738. htt_tlv_filter.attention = 1;
  4739. htt_tlv_filter.ppdu_start = 0;
  4740. htt_tlv_filter.ppdu_end = 0;
  4741. htt_tlv_filter.ppdu_end_user_stats = 0;
  4742. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4743. htt_tlv_filter.ppdu_end_status_done = 0;
  4744. htt_tlv_filter.header_per_msdu = 1;
  4745. htt_tlv_filter.enable_fp =
  4746. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4747. htt_tlv_filter.enable_md = 0;
  4748. htt_tlv_filter.enable_mo =
  4749. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4750. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4751. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4752. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4753. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4754. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4755. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4756. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4757. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4758. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4759. pdev, mac_id,
  4760. htt_tlv_filter);
  4761. if (status != QDF_STATUS_SUCCESS) {
  4762. dp_err("Failed to send tlv filter for monitor mode rings");
  4763. return status;
  4764. }
  4765. }
  4766. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4767. htt_tlv_filter.mpdu_start = 1;
  4768. htt_tlv_filter.msdu_start = 0;
  4769. htt_tlv_filter.packet = 0;
  4770. htt_tlv_filter.msdu_end = 0;
  4771. htt_tlv_filter.mpdu_end = 0;
  4772. htt_tlv_filter.attention = 0;
  4773. htt_tlv_filter.ppdu_start = 1;
  4774. htt_tlv_filter.ppdu_end = 1;
  4775. htt_tlv_filter.ppdu_end_user_stats = 1;
  4776. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4777. htt_tlv_filter.ppdu_end_status_done = 1;
  4778. htt_tlv_filter.enable_fp = 1;
  4779. htt_tlv_filter.enable_md = 0;
  4780. htt_tlv_filter.enable_mo = 1;
  4781. if (pdev->mcopy_mode) {
  4782. htt_tlv_filter.packet_header = 1;
  4783. }
  4784. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4785. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4786. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4787. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4788. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4789. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4790. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4791. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4792. pdev->pdev_id);
  4793. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4794. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4795. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4796. }
  4797. return QDF_STATUS_SUCCESS;
  4798. }
  4799. /**
  4800. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  4801. * @pdev_handle: Datapath PDEV handle
  4802. * @filter_val: Flag to select Filter for monitor mode
  4803. * Return: 0 on success, not 0 on failure
  4804. */
  4805. static QDF_STATUS
  4806. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  4807. struct cdp_monitor_filter *filter_val)
  4808. {
  4809. /* Many monitor VAPs can exists in a system but only one can be up at
  4810. * anytime
  4811. */
  4812. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4813. struct dp_vdev *vdev = pdev->monitor_vdev;
  4814. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4815. struct dp_soc *soc;
  4816. uint8_t pdev_id;
  4817. int mac_id;
  4818. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4819. pdev_id = pdev->pdev_id;
  4820. soc = pdev->soc;
  4821. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4822. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4823. pdev, pdev_id, soc, vdev);
  4824. /*Check if current pdev's monitor_vdev exists */
  4825. if (!pdev->monitor_vdev) {
  4826. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4827. "vdev=%pK", vdev);
  4828. qdf_assert(vdev);
  4829. }
  4830. /* update filter mode, type in pdev structure */
  4831. pdev->mon_filter_mode = filter_val->mode;
  4832. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  4833. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  4834. pdev->fp_data_filter = filter_val->fp_data;
  4835. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  4836. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  4837. pdev->mo_data_filter = filter_val->mo_data;
  4838. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4839. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4840. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4841. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4842. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4843. pdev->mo_data_filter);
  4844. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4845. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4846. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4847. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4848. pdev, mac_id,
  4849. htt_tlv_filter);
  4850. if (status != QDF_STATUS_SUCCESS) {
  4851. dp_err("Failed to send tlv filter for monitor mode rings");
  4852. return status;
  4853. }
  4854. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4855. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4856. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4857. }
  4858. htt_tlv_filter.mpdu_start = 1;
  4859. htt_tlv_filter.msdu_start = 1;
  4860. htt_tlv_filter.packet = 1;
  4861. htt_tlv_filter.msdu_end = 1;
  4862. htt_tlv_filter.mpdu_end = 1;
  4863. htt_tlv_filter.packet_header = 1;
  4864. htt_tlv_filter.attention = 1;
  4865. htt_tlv_filter.ppdu_start = 0;
  4866. htt_tlv_filter.ppdu_end = 0;
  4867. htt_tlv_filter.ppdu_end_user_stats = 0;
  4868. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4869. htt_tlv_filter.ppdu_end_status_done = 0;
  4870. htt_tlv_filter.header_per_msdu = 1;
  4871. htt_tlv_filter.enable_fp =
  4872. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4873. htt_tlv_filter.enable_md = 0;
  4874. htt_tlv_filter.enable_mo =
  4875. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4876. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4877. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4878. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4879. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4880. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4881. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4882. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4883. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4884. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4885. pdev, mac_id,
  4886. htt_tlv_filter);
  4887. if (status != QDF_STATUS_SUCCESS) {
  4888. dp_err("Failed to send tlv filter for monitor mode rings");
  4889. return status;
  4890. }
  4891. }
  4892. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4893. htt_tlv_filter.mpdu_start = 1;
  4894. htt_tlv_filter.msdu_start = 0;
  4895. htt_tlv_filter.packet = 0;
  4896. htt_tlv_filter.msdu_end = 0;
  4897. htt_tlv_filter.mpdu_end = 0;
  4898. htt_tlv_filter.attention = 0;
  4899. htt_tlv_filter.ppdu_start = 1;
  4900. htt_tlv_filter.ppdu_end = 1;
  4901. htt_tlv_filter.ppdu_end_user_stats = 1;
  4902. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4903. htt_tlv_filter.ppdu_end_status_done = 1;
  4904. htt_tlv_filter.enable_fp = 1;
  4905. htt_tlv_filter.enable_md = 0;
  4906. htt_tlv_filter.enable_mo = 1;
  4907. if (pdev->mcopy_mode) {
  4908. htt_tlv_filter.packet_header = 1;
  4909. }
  4910. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4911. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4912. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4913. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4914. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4915. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4916. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4917. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4918. pdev->pdev_id);
  4919. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4920. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4921. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4922. }
  4923. return QDF_STATUS_SUCCESS;
  4924. }
  4925. /**
  4926. * dp_get_pdev_id_frm_pdev() - get pdev_id
  4927. * @pdev_handle: Datapath PDEV handle
  4928. *
  4929. * Return: pdev_id
  4930. */
  4931. static
  4932. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  4933. {
  4934. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4935. return pdev->pdev_id;
  4936. }
  4937. /**
  4938. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  4939. * @pdev_handle: Datapath PDEV handle
  4940. * @chan_noise_floor: Channel Noise Floor
  4941. *
  4942. * Return: void
  4943. */
  4944. static
  4945. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  4946. int16_t chan_noise_floor)
  4947. {
  4948. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4949. pdev->chan_noise_floor = chan_noise_floor;
  4950. }
  4951. /**
  4952. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  4953. * @vdev_handle: Datapath VDEV handle
  4954. * Return: true on ucast filter flag set
  4955. */
  4956. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  4957. {
  4958. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4959. struct dp_pdev *pdev;
  4960. pdev = vdev->pdev;
  4961. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  4962. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  4963. return true;
  4964. return false;
  4965. }
  4966. /**
  4967. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  4968. * @vdev_handle: Datapath VDEV handle
  4969. * Return: true on mcast filter flag set
  4970. */
  4971. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  4972. {
  4973. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4974. struct dp_pdev *pdev;
  4975. pdev = vdev->pdev;
  4976. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  4977. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  4978. return true;
  4979. return false;
  4980. }
  4981. /**
  4982. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  4983. * @vdev_handle: Datapath VDEV handle
  4984. * Return: true on non data filter flag set
  4985. */
  4986. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  4987. {
  4988. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4989. struct dp_pdev *pdev;
  4990. pdev = vdev->pdev;
  4991. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  4992. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  4993. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  4994. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  4995. return true;
  4996. }
  4997. }
  4998. return false;
  4999. }
  5000. #ifdef MESH_MODE_SUPPORT
  5001. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  5002. {
  5003. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5004. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5005. FL("val %d"), val);
  5006. vdev->mesh_vdev = val;
  5007. }
  5008. /*
  5009. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  5010. * @vdev_hdl: virtual device object
  5011. * @val: value to be set
  5012. *
  5013. * Return: void
  5014. */
  5015. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  5016. {
  5017. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5018. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5019. FL("val %d"), val);
  5020. vdev->mesh_rx_filter = val;
  5021. }
  5022. #endif
  5023. /*
  5024. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  5025. * Current scope is bar received count
  5026. *
  5027. * @pdev_handle: DP_PDEV handle
  5028. *
  5029. * Return: void
  5030. */
  5031. #define STATS_PROC_TIMEOUT (HZ/1000)
  5032. static void
  5033. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  5034. {
  5035. struct dp_vdev *vdev;
  5036. struct dp_peer *peer;
  5037. uint32_t waitcnt;
  5038. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5039. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5040. if (!peer) {
  5041. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5042. FL("DP Invalid Peer refernce"));
  5043. return;
  5044. }
  5045. if (peer->delete_in_progress) {
  5046. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5047. FL("DP Peer deletion in progress"));
  5048. continue;
  5049. }
  5050. qdf_atomic_inc(&peer->ref_cnt);
  5051. waitcnt = 0;
  5052. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  5053. while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
  5054. && waitcnt < 10) {
  5055. schedule_timeout_interruptible(
  5056. STATS_PROC_TIMEOUT);
  5057. waitcnt++;
  5058. }
  5059. qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
  5060. dp_peer_unref_delete(peer);
  5061. }
  5062. }
  5063. }
  5064. /**
  5065. * dp_rx_bar_stats_cb(): BAR received stats callback
  5066. * @soc: SOC handle
  5067. * @cb_ctxt: Call back context
  5068. * @reo_status: Reo status
  5069. *
  5070. * return: void
  5071. */
  5072. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  5073. union hal_reo_status *reo_status)
  5074. {
  5075. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  5076. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  5077. if (!qdf_atomic_read(&soc->cmn_init_done))
  5078. return;
  5079. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  5080. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  5081. queue_status->header.status);
  5082. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5083. return;
  5084. }
  5085. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  5086. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5087. }
  5088. /**
  5089. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  5090. * @vdev: DP VDEV handle
  5091. *
  5092. * return: void
  5093. */
  5094. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  5095. struct cdp_vdev_stats *vdev_stats)
  5096. {
  5097. struct dp_peer *peer = NULL;
  5098. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  5099. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  5100. dp_update_vdev_stats(vdev_stats, peer);
  5101. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5102. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5103. vdev_stats, vdev->vdev_id,
  5104. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5105. #endif
  5106. }
  5107. /**
  5108. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  5109. * @pdev: DP PDEV handle
  5110. *
  5111. * return: void
  5112. */
  5113. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  5114. {
  5115. struct dp_vdev *vdev = NULL;
  5116. struct cdp_vdev_stats *vdev_stats =
  5117. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5118. if (!vdev_stats) {
  5119. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5120. "DP alloc failure - unable to get alloc vdev stats");
  5121. return;
  5122. }
  5123. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  5124. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  5125. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  5126. if (pdev->mcopy_mode)
  5127. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  5128. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5129. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5130. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5131. dp_update_pdev_stats(pdev, vdev_stats);
  5132. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
  5133. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  5134. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  5135. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  5136. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  5137. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  5138. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  5139. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  5140. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
  5141. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  5142. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
  5143. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  5144. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  5145. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  5146. DP_STATS_AGGR(pdev, vdev,
  5147. tx_i.mcast_en.dropped_map_error);
  5148. DP_STATS_AGGR(pdev, vdev,
  5149. tx_i.mcast_en.dropped_self_mac);
  5150. DP_STATS_AGGR(pdev, vdev,
  5151. tx_i.mcast_en.dropped_send_fail);
  5152. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  5153. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  5154. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  5155. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  5156. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
  5157. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  5158. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
  5159. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
  5160. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
  5161. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
  5162. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
  5163. pdev->stats.tx_i.dropped.dropped_pkt.num =
  5164. pdev->stats.tx_i.dropped.dma_error +
  5165. pdev->stats.tx_i.dropped.ring_full +
  5166. pdev->stats.tx_i.dropped.enqueue_fail +
  5167. pdev->stats.tx_i.dropped.desc_na.num +
  5168. pdev->stats.tx_i.dropped.res_full;
  5169. pdev->stats.tx.last_ack_rssi =
  5170. vdev->stats.tx.last_ack_rssi;
  5171. pdev->stats.tx_i.tso.num_seg =
  5172. vdev->stats.tx_i.tso.num_seg;
  5173. }
  5174. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5175. qdf_mem_free(vdev_stats);
  5176. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5177. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  5178. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  5179. #endif
  5180. }
  5181. /**
  5182. * dp_vdev_getstats() - get vdev packet level stats
  5183. * @vdev_handle: Datapath VDEV handle
  5184. * @stats: cdp network device stats structure
  5185. *
  5186. * Return: void
  5187. */
  5188. static void dp_vdev_getstats(void *vdev_handle,
  5189. struct cdp_dev_stats *stats)
  5190. {
  5191. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5192. struct cdp_vdev_stats *vdev_stats =
  5193. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5194. if (!vdev_stats) {
  5195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5196. "DP alloc failure - unable to get alloc vdev stats");
  5197. return;
  5198. }
  5199. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5200. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  5201. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  5202. stats->tx_errors = vdev_stats->tx.tx_failed +
  5203. vdev_stats->tx_i.dropped.dropped_pkt.num;
  5204. stats->tx_dropped = stats->tx_errors;
  5205. stats->rx_packets = vdev_stats->rx.unicast.num +
  5206. vdev_stats->rx.multicast.num +
  5207. vdev_stats->rx.bcast.num;
  5208. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  5209. vdev_stats->rx.multicast.bytes +
  5210. vdev_stats->rx.bcast.bytes;
  5211. }
  5212. /**
  5213. * dp_pdev_getstats() - get pdev packet level stats
  5214. * @pdev_handle: Datapath PDEV handle
  5215. * @stats: cdp network device stats structure
  5216. *
  5217. * Return: void
  5218. */
  5219. static void dp_pdev_getstats(void *pdev_handle,
  5220. struct cdp_dev_stats *stats)
  5221. {
  5222. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5223. dp_aggregate_pdev_stats(pdev);
  5224. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  5225. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  5226. stats->tx_errors = pdev->stats.tx.tx_failed +
  5227. pdev->stats.tx_i.dropped.dropped_pkt.num;
  5228. stats->tx_dropped = stats->tx_errors;
  5229. stats->rx_packets = pdev->stats.rx.unicast.num +
  5230. pdev->stats.rx.multicast.num +
  5231. pdev->stats.rx.bcast.num;
  5232. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  5233. pdev->stats.rx.multicast.bytes +
  5234. pdev->stats.rx.bcast.bytes;
  5235. }
  5236. /**
  5237. * dp_get_device_stats() - get interface level packet stats
  5238. * @handle: device handle
  5239. * @stats: cdp network device stats structure
  5240. * @type: device type pdev/vdev
  5241. *
  5242. * Return: void
  5243. */
  5244. static void dp_get_device_stats(void *handle,
  5245. struct cdp_dev_stats *stats, uint8_t type)
  5246. {
  5247. switch (type) {
  5248. case UPDATE_VDEV_STATS:
  5249. dp_vdev_getstats(handle, stats);
  5250. break;
  5251. case UPDATE_PDEV_STATS:
  5252. dp_pdev_getstats(handle, stats);
  5253. break;
  5254. default:
  5255. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5256. "apstats cannot be updated for this input "
  5257. "type %d", type);
  5258. break;
  5259. }
  5260. }
  5261. /**
  5262. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  5263. * @pdev: DP_PDEV Handle
  5264. *
  5265. * Return:void
  5266. */
  5267. static inline void
  5268. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  5269. {
  5270. uint8_t index = 0;
  5271. DP_PRINT_STATS("PDEV Tx Stats:\n");
  5272. DP_PRINT_STATS("Received From Stack:");
  5273. DP_PRINT_STATS(" Packets = %d",
  5274. pdev->stats.tx_i.rcvd.num);
  5275. DP_PRINT_STATS(" Bytes = %llu",
  5276. pdev->stats.tx_i.rcvd.bytes);
  5277. DP_PRINT_STATS("Processed:");
  5278. DP_PRINT_STATS(" Packets = %d",
  5279. pdev->stats.tx_i.processed.num);
  5280. DP_PRINT_STATS(" Bytes = %llu",
  5281. pdev->stats.tx_i.processed.bytes);
  5282. DP_PRINT_STATS("Total Completions:");
  5283. DP_PRINT_STATS(" Packets = %u",
  5284. pdev->stats.tx.comp_pkt.num);
  5285. DP_PRINT_STATS(" Bytes = %llu",
  5286. pdev->stats.tx.comp_pkt.bytes);
  5287. DP_PRINT_STATS("Successful Completions:");
  5288. DP_PRINT_STATS(" Packets = %u",
  5289. pdev->stats.tx.tx_success.num);
  5290. DP_PRINT_STATS(" Bytes = %llu",
  5291. pdev->stats.tx.tx_success.bytes);
  5292. DP_PRINT_STATS("Dropped:");
  5293. DP_PRINT_STATS(" Total = %d",
  5294. pdev->stats.tx_i.dropped.dropped_pkt.num);
  5295. DP_PRINT_STATS(" Dma_map_error = %d",
  5296. pdev->stats.tx_i.dropped.dma_error);
  5297. DP_PRINT_STATS(" Ring Full = %d",
  5298. pdev->stats.tx_i.dropped.ring_full);
  5299. DP_PRINT_STATS(" Descriptor Not available = %d",
  5300. pdev->stats.tx_i.dropped.desc_na.num);
  5301. DP_PRINT_STATS(" HW enqueue failed= %d",
  5302. pdev->stats.tx_i.dropped.enqueue_fail);
  5303. DP_PRINT_STATS(" Resources Full = %d",
  5304. pdev->stats.tx_i.dropped.res_full);
  5305. DP_PRINT_STATS(" FW removed Pkts = %u",
  5306. pdev->stats.tx.dropped.fw_rem.num);
  5307. DP_PRINT_STATS(" FW removed bytes= %llu",
  5308. pdev->stats.tx.dropped.fw_rem.bytes);
  5309. DP_PRINT_STATS(" FW removed transmitted = %d",
  5310. pdev->stats.tx.dropped.fw_rem_tx);
  5311. DP_PRINT_STATS(" FW removed untransmitted = %d",
  5312. pdev->stats.tx.dropped.fw_rem_notx);
  5313. DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
  5314. pdev->stats.tx.dropped.fw_reason1);
  5315. DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
  5316. pdev->stats.tx.dropped.fw_reason2);
  5317. DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
  5318. pdev->stats.tx.dropped.fw_reason3);
  5319. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  5320. pdev->stats.tx.dropped.age_out);
  5321. DP_PRINT_STATS(" headroom insufficient = %d",
  5322. pdev->stats.tx_i.dropped.headroom_insufficient);
  5323. DP_PRINT_STATS(" Multicast:");
  5324. DP_PRINT_STATS(" Packets: %u",
  5325. pdev->stats.tx.mcast.num);
  5326. DP_PRINT_STATS(" Bytes: %llu",
  5327. pdev->stats.tx.mcast.bytes);
  5328. DP_PRINT_STATS("Scatter Gather:");
  5329. DP_PRINT_STATS(" Packets = %d",
  5330. pdev->stats.tx_i.sg.sg_pkt.num);
  5331. DP_PRINT_STATS(" Bytes = %llu",
  5332. pdev->stats.tx_i.sg.sg_pkt.bytes);
  5333. DP_PRINT_STATS(" Dropped By Host = %d",
  5334. pdev->stats.tx_i.sg.dropped_host.num);
  5335. DP_PRINT_STATS(" Dropped By Target = %d",
  5336. pdev->stats.tx_i.sg.dropped_target);
  5337. DP_PRINT_STATS("TSO:");
  5338. DP_PRINT_STATS(" Number of Segments = %d",
  5339. pdev->stats.tx_i.tso.num_seg);
  5340. DP_PRINT_STATS(" Packets = %d",
  5341. pdev->stats.tx_i.tso.tso_pkt.num);
  5342. DP_PRINT_STATS(" Bytes = %llu",
  5343. pdev->stats.tx_i.tso.tso_pkt.bytes);
  5344. DP_PRINT_STATS(" Dropped By Host = %d",
  5345. pdev->stats.tx_i.tso.dropped_host.num);
  5346. DP_PRINT_STATS("Mcast Enhancement:");
  5347. DP_PRINT_STATS(" Packets = %d",
  5348. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  5349. DP_PRINT_STATS(" Bytes = %llu",
  5350. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  5351. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  5352. pdev->stats.tx_i.mcast_en.dropped_map_error);
  5353. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  5354. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  5355. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  5356. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  5357. DP_PRINT_STATS(" Unicast sent = %d",
  5358. pdev->stats.tx_i.mcast_en.ucast);
  5359. DP_PRINT_STATS("Raw:");
  5360. DP_PRINT_STATS(" Packets = %d",
  5361. pdev->stats.tx_i.raw.raw_pkt.num);
  5362. DP_PRINT_STATS(" Bytes = %llu",
  5363. pdev->stats.tx_i.raw.raw_pkt.bytes);
  5364. DP_PRINT_STATS(" DMA map error = %d",
  5365. pdev->stats.tx_i.raw.dma_map_error);
  5366. DP_PRINT_STATS("Reinjected:");
  5367. DP_PRINT_STATS(" Packets = %d",
  5368. pdev->stats.tx_i.reinject_pkts.num);
  5369. DP_PRINT_STATS(" Bytes = %llu\n",
  5370. pdev->stats.tx_i.reinject_pkts.bytes);
  5371. DP_PRINT_STATS("Inspected:");
  5372. DP_PRINT_STATS(" Packets = %d",
  5373. pdev->stats.tx_i.inspect_pkts.num);
  5374. DP_PRINT_STATS(" Bytes = %llu",
  5375. pdev->stats.tx_i.inspect_pkts.bytes);
  5376. DP_PRINT_STATS("Nawds Multicast:");
  5377. DP_PRINT_STATS(" Packets = %d",
  5378. pdev->stats.tx_i.nawds_mcast.num);
  5379. DP_PRINT_STATS(" Bytes = %llu",
  5380. pdev->stats.tx_i.nawds_mcast.bytes);
  5381. DP_PRINT_STATS("CCE Classified:");
  5382. DP_PRINT_STATS(" CCE Classified Packets: %u",
  5383. pdev->stats.tx_i.cce_classified);
  5384. DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
  5385. pdev->stats.tx_i.cce_classified_raw);
  5386. DP_PRINT_STATS("Mesh stats:");
  5387. DP_PRINT_STATS(" frames to firmware: %u",
  5388. pdev->stats.tx_i.mesh.exception_fw);
  5389. DP_PRINT_STATS(" completions from fw: %u",
  5390. pdev->stats.tx_i.mesh.completion_fw);
  5391. DP_PRINT_STATS("PPDU stats counter");
  5392. for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
  5393. DP_PRINT_STATS(" Tag[%d] = %llu", index,
  5394. pdev->stats.ppdu_stats_counter[index]);
  5395. }
  5396. }
  5397. /**
  5398. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  5399. * @pdev: DP_PDEV Handle
  5400. *
  5401. * Return: void
  5402. */
  5403. static inline void
  5404. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  5405. {
  5406. DP_PRINT_STATS("PDEV Rx Stats:\n");
  5407. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  5408. DP_PRINT_STATS(" Packets = %d %d %d %d",
  5409. pdev->stats.rx.rcvd_reo[0].num,
  5410. pdev->stats.rx.rcvd_reo[1].num,
  5411. pdev->stats.rx.rcvd_reo[2].num,
  5412. pdev->stats.rx.rcvd_reo[3].num);
  5413. DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
  5414. pdev->stats.rx.rcvd_reo[0].bytes,
  5415. pdev->stats.rx.rcvd_reo[1].bytes,
  5416. pdev->stats.rx.rcvd_reo[2].bytes,
  5417. pdev->stats.rx.rcvd_reo[3].bytes);
  5418. DP_PRINT_STATS("Replenished:");
  5419. DP_PRINT_STATS(" Packets = %d",
  5420. pdev->stats.replenish.pkts.num);
  5421. DP_PRINT_STATS(" Bytes = %llu",
  5422. pdev->stats.replenish.pkts.bytes);
  5423. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  5424. pdev->stats.buf_freelist);
  5425. DP_PRINT_STATS(" Low threshold intr = %d",
  5426. pdev->stats.replenish.low_thresh_intrs);
  5427. DP_PRINT_STATS("Dropped:");
  5428. DP_PRINT_STATS(" msdu_not_done = %d",
  5429. pdev->stats.dropped.msdu_not_done);
  5430. DP_PRINT_STATS(" mon_rx_drop = %d",
  5431. pdev->stats.dropped.mon_rx_drop);
  5432. DP_PRINT_STATS(" mec_drop = %d",
  5433. pdev->stats.rx.mec_drop.num);
  5434. DP_PRINT_STATS(" Bytes = %llu",
  5435. pdev->stats.rx.mec_drop.bytes);
  5436. DP_PRINT_STATS("Sent To Stack:");
  5437. DP_PRINT_STATS(" Packets = %d",
  5438. pdev->stats.rx.to_stack.num);
  5439. DP_PRINT_STATS(" Bytes = %llu",
  5440. pdev->stats.rx.to_stack.bytes);
  5441. DP_PRINT_STATS("Multicast/Broadcast:");
  5442. DP_PRINT_STATS(" Packets = %d",
  5443. pdev->stats.rx.multicast.num);
  5444. DP_PRINT_STATS(" Bytes = %llu",
  5445. pdev->stats.rx.multicast.bytes);
  5446. DP_PRINT_STATS("Errors:");
  5447. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  5448. pdev->stats.replenish.rxdma_err);
  5449. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  5450. pdev->stats.err.desc_alloc_fail);
  5451. DP_PRINT_STATS(" IP checksum error = %d",
  5452. pdev->stats.err.ip_csum_err);
  5453. DP_PRINT_STATS(" TCP/UDP checksum error = %d",
  5454. pdev->stats.err.tcp_udp_csum_err);
  5455. /* Get bar_recv_cnt */
  5456. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  5457. DP_PRINT_STATS("BAR Received Count: = %d",
  5458. pdev->stats.rx.bar_recv_cnt);
  5459. }
  5460. /**
  5461. * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
  5462. * @pdev: DP_PDEV Handle
  5463. *
  5464. * Return: void
  5465. */
  5466. static inline void
  5467. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  5468. {
  5469. struct cdp_pdev_mon_stats *rx_mon_stats;
  5470. rx_mon_stats = &pdev->rx_mon_stats;
  5471. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  5472. dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
  5473. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  5474. rx_mon_stats->status_ppdu_done);
  5475. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  5476. rx_mon_stats->dest_ppdu_done);
  5477. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  5478. rx_mon_stats->dest_mpdu_done);
  5479. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  5480. rx_mon_stats->dest_mpdu_drop);
  5481. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  5482. rx_mon_stats->dup_mon_linkdesc_cnt);
  5483. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  5484. rx_mon_stats->dup_mon_buf_cnt);
  5485. }
  5486. /**
  5487. * dp_print_soc_tx_stats(): Print SOC level stats
  5488. * @soc DP_SOC Handle
  5489. *
  5490. * Return: void
  5491. */
  5492. static inline void
  5493. dp_print_soc_tx_stats(struct dp_soc *soc)
  5494. {
  5495. uint8_t desc_pool_id;
  5496. soc->stats.tx.desc_in_use = 0;
  5497. DP_PRINT_STATS("SOC Tx Stats:\n");
  5498. for (desc_pool_id = 0;
  5499. desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5500. desc_pool_id++)
  5501. soc->stats.tx.desc_in_use +=
  5502. soc->tx_desc[desc_pool_id].num_allocated;
  5503. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  5504. soc->stats.tx.desc_in_use);
  5505. DP_PRINT_STATS("Invalid peer:");
  5506. DP_PRINT_STATS(" Packets = %d",
  5507. soc->stats.tx.tx_invalid_peer.num);
  5508. DP_PRINT_STATS(" Bytes = %llu",
  5509. soc->stats.tx.tx_invalid_peer.bytes);
  5510. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  5511. soc->stats.tx.tcl_ring_full[0],
  5512. soc->stats.tx.tcl_ring_full[1],
  5513. soc->stats.tx.tcl_ring_full[2]);
  5514. }
  5515. /**
  5516. * dp_print_soc_rx_stats: Print SOC level Rx stats
  5517. * @soc: DP_SOC Handle
  5518. *
  5519. * Return:void
  5520. */
  5521. static inline void
  5522. dp_print_soc_rx_stats(struct dp_soc *soc)
  5523. {
  5524. uint32_t i;
  5525. char reo_error[DP_REO_ERR_LENGTH];
  5526. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  5527. uint8_t index = 0;
  5528. DP_PRINT_STATS("SOC Rx Stats:\n");
  5529. DP_PRINT_STATS("Fragmented packets: %u",
  5530. soc->stats.rx.rx_frags);
  5531. DP_PRINT_STATS("Reo reinjected packets: %u",
  5532. soc->stats.rx.reo_reinject);
  5533. DP_PRINT_STATS("Errors:\n");
  5534. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  5535. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  5536. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  5537. DP_PRINT_STATS("Invalid RBM = %d",
  5538. soc->stats.rx.err.invalid_rbm);
  5539. DP_PRINT_STATS("Invalid Vdev = %d",
  5540. soc->stats.rx.err.invalid_vdev);
  5541. DP_PRINT_STATS("Invalid Pdev = %d",
  5542. soc->stats.rx.err.invalid_pdev);
  5543. DP_PRINT_STATS("Invalid Peer = %d",
  5544. soc->stats.rx.err.rx_invalid_peer.num);
  5545. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  5546. soc->stats.rx.err.hal_ring_access_fail);
  5547. DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
  5548. DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
  5549. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  5550. index += qdf_snprint(&rxdma_error[index],
  5551. DP_RXDMA_ERR_LENGTH - index,
  5552. " %d", soc->stats.rx.err.rxdma_error[i]);
  5553. }
  5554. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  5555. rxdma_error);
  5556. index = 0;
  5557. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  5558. index += qdf_snprint(&reo_error[index],
  5559. DP_REO_ERR_LENGTH - index,
  5560. " %d", soc->stats.rx.err.reo_error[i]);
  5561. }
  5562. DP_PRINT_STATS("REO Error(0-14):%s",
  5563. reo_error);
  5564. }
  5565. /**
  5566. * dp_srng_get_str_from_ring_type() - Return string name for a ring
  5567. * @ring_type: Ring
  5568. *
  5569. * Return: char const pointer
  5570. */
  5571. static inline const
  5572. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  5573. {
  5574. switch (ring_type) {
  5575. case REO_DST:
  5576. return "Reo_dst";
  5577. case REO_EXCEPTION:
  5578. return "Reo_exception";
  5579. case REO_CMD:
  5580. return "Reo_cmd";
  5581. case REO_REINJECT:
  5582. return "Reo_reinject";
  5583. case REO_STATUS:
  5584. return "Reo_status";
  5585. case WBM2SW_RELEASE:
  5586. return "wbm2sw_release";
  5587. case TCL_DATA:
  5588. return "tcl_data";
  5589. case TCL_CMD:
  5590. return "tcl_cmd";
  5591. case TCL_STATUS:
  5592. return "tcl_status";
  5593. case SW2WBM_RELEASE:
  5594. return "sw2wbm_release";
  5595. case RXDMA_BUF:
  5596. return "Rxdma_buf";
  5597. case RXDMA_DST:
  5598. return "Rxdma_dst";
  5599. case RXDMA_MONITOR_BUF:
  5600. return "Rxdma_monitor_buf";
  5601. case RXDMA_MONITOR_DESC:
  5602. return "Rxdma_monitor_desc";
  5603. case RXDMA_MONITOR_STATUS:
  5604. return "Rxdma_monitor_status";
  5605. default:
  5606. dp_err("Invalid ring type");
  5607. break;
  5608. }
  5609. return "Invalid";
  5610. }
  5611. /**
  5612. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  5613. * @soc: DP_SOC handle
  5614. * @srng: DP_SRNG handle
  5615. * @ring_name: SRNG name
  5616. * @ring_type: srng src/dst ring
  5617. *
  5618. * Return: void
  5619. */
  5620. static void
  5621. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  5622. enum hal_ring_type ring_type)
  5623. {
  5624. uint32_t tailp;
  5625. uint32_t headp;
  5626. int32_t hw_headp = -1;
  5627. int32_t hw_tailp = -1;
  5628. const char *ring_name;
  5629. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  5630. if (soc && srng && srng->hal_srng) {
  5631. ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
  5632. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  5633. DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
  5634. ring_name, headp, tailp);
  5635. hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
  5636. &hw_tailp, ring_type);
  5637. DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
  5638. ring_name, hw_headp, hw_tailp);
  5639. }
  5640. }
  5641. /**
  5642. * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
  5643. * on target
  5644. * @pdev: physical device handle
  5645. * @mac_id: mac id
  5646. *
  5647. * Return: void
  5648. */
  5649. static inline
  5650. void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
  5651. {
  5652. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  5653. dp_print_ring_stat_from_hal(pdev->soc,
  5654. &pdev->rxdma_mon_buf_ring[mac_id],
  5655. RXDMA_MONITOR_BUF);
  5656. dp_print_ring_stat_from_hal(pdev->soc,
  5657. &pdev->rxdma_mon_dst_ring[mac_id],
  5658. RXDMA_MONITOR_DST);
  5659. dp_print_ring_stat_from_hal(pdev->soc,
  5660. &pdev->rxdma_mon_desc_ring[mac_id],
  5661. RXDMA_MONITOR_DESC);
  5662. }
  5663. dp_print_ring_stat_from_hal(pdev->soc,
  5664. &pdev->rxdma_mon_status_ring[mac_id],
  5665. RXDMA_MONITOR_STATUS);
  5666. }
  5667. /**
  5668. * dp_print_ring_stats(): Print tail and head pointer
  5669. * @pdev: DP_PDEV handle
  5670. *
  5671. * Return:void
  5672. */
  5673. static inline void
  5674. dp_print_ring_stats(struct dp_pdev *pdev)
  5675. {
  5676. uint32_t i;
  5677. int mac_id;
  5678. dp_print_ring_stat_from_hal(pdev->soc,
  5679. &pdev->soc->reo_exception_ring,
  5680. REO_EXCEPTION);
  5681. dp_print_ring_stat_from_hal(pdev->soc,
  5682. &pdev->soc->reo_reinject_ring,
  5683. REO_REINJECT);
  5684. dp_print_ring_stat_from_hal(pdev->soc,
  5685. &pdev->soc->reo_cmd_ring,
  5686. REO_CMD);
  5687. dp_print_ring_stat_from_hal(pdev->soc,
  5688. &pdev->soc->reo_status_ring,
  5689. REO_STATUS);
  5690. dp_print_ring_stat_from_hal(pdev->soc,
  5691. &pdev->soc->rx_rel_ring,
  5692. WBM2SW_RELEASE);
  5693. dp_print_ring_stat_from_hal(pdev->soc,
  5694. &pdev->soc->tcl_cmd_ring,
  5695. TCL_CMD);
  5696. dp_print_ring_stat_from_hal(pdev->soc,
  5697. &pdev->soc->tcl_status_ring,
  5698. TCL_STATUS);
  5699. dp_print_ring_stat_from_hal(pdev->soc,
  5700. &pdev->soc->wbm_desc_rel_ring,
  5701. SW2WBM_RELEASE);
  5702. for (i = 0; i < MAX_REO_DEST_RINGS; i++)
  5703. dp_print_ring_stat_from_hal(pdev->soc,
  5704. &pdev->soc->reo_dest_ring[i],
  5705. REO_DST);
  5706. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
  5707. dp_print_ring_stat_from_hal(pdev->soc,
  5708. &pdev->soc->tcl_data_ring[i],
  5709. TCL_DATA);
  5710. for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
  5711. dp_print_ring_stat_from_hal(pdev->soc,
  5712. &pdev->soc->tx_comp_ring[i],
  5713. WBM2SW_RELEASE);
  5714. dp_print_ring_stat_from_hal(pdev->soc,
  5715. &pdev->rx_refill_buf_ring,
  5716. RXDMA_BUF);
  5717. dp_print_ring_stat_from_hal(pdev->soc,
  5718. &pdev->rx_refill_buf_ring2,
  5719. RXDMA_BUF);
  5720. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  5721. dp_print_ring_stat_from_hal(pdev->soc,
  5722. &pdev->rx_mac_buf_ring[i],
  5723. RXDMA_BUF);
  5724. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
  5725. dp_print_mon_ring_stat_from_hal(pdev, mac_id);
  5726. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
  5727. dp_print_ring_stat_from_hal(pdev->soc,
  5728. &pdev->rxdma_err_dst_ring[i],
  5729. RXDMA_DST);
  5730. }
  5731. /**
  5732. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  5733. * @vdev: DP_VDEV handle
  5734. *
  5735. * Return:void
  5736. */
  5737. static inline void
  5738. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  5739. {
  5740. struct dp_peer *peer = NULL;
  5741. DP_STATS_CLR(vdev->pdev);
  5742. DP_STATS_CLR(vdev->pdev->soc);
  5743. DP_STATS_CLR(vdev);
  5744. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5745. if (!peer)
  5746. return;
  5747. DP_STATS_CLR(peer);
  5748. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5749. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5750. &peer->stats, peer->peer_ids[0],
  5751. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  5752. #endif
  5753. }
  5754. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5755. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5756. &vdev->stats, vdev->vdev_id,
  5757. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5758. #endif
  5759. }
  5760. /**
  5761. * dp_print_common_rates_info(): Print common rate for tx or rx
  5762. * @pkt_type_array: rate type array contains rate info
  5763. *
  5764. * Return:void
  5765. */
  5766. static inline void
  5767. dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
  5768. {
  5769. uint8_t mcs, pkt_type;
  5770. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  5771. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  5772. if (!dp_rate_string[pkt_type][mcs].valid)
  5773. continue;
  5774. DP_PRINT_STATS(" %s = %d",
  5775. dp_rate_string[pkt_type][mcs].mcs_type,
  5776. pkt_type_array[pkt_type].mcs_count[mcs]);
  5777. }
  5778. DP_PRINT_STATS("\n");
  5779. }
  5780. }
  5781. /**
  5782. * dp_print_rx_rates(): Print Rx rate stats
  5783. * @vdev: DP_VDEV handle
  5784. *
  5785. * Return:void
  5786. */
  5787. static inline void
  5788. dp_print_rx_rates(struct dp_vdev *vdev)
  5789. {
  5790. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5791. uint8_t i;
  5792. uint8_t index = 0;
  5793. char nss[DP_NSS_LENGTH];
  5794. DP_PRINT_STATS("Rx Rate Info:\n");
  5795. dp_print_common_rates_info(pdev->stats.rx.pkt_type);
  5796. index = 0;
  5797. for (i = 0; i < SS_COUNT; i++) {
  5798. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5799. " %d", pdev->stats.rx.nss[i]);
  5800. }
  5801. DP_PRINT_STATS("NSS(1-8) = %s",
  5802. nss);
  5803. DP_PRINT_STATS("SGI ="
  5804. " 0.8us %d,"
  5805. " 0.4us %d,"
  5806. " 1.6us %d,"
  5807. " 3.2us %d,",
  5808. pdev->stats.rx.sgi_count[0],
  5809. pdev->stats.rx.sgi_count[1],
  5810. pdev->stats.rx.sgi_count[2],
  5811. pdev->stats.rx.sgi_count[3]);
  5812. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5813. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  5814. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  5815. DP_PRINT_STATS("Reception Type ="
  5816. " SU: %d,"
  5817. " MU_MIMO:%d,"
  5818. " MU_OFDMA:%d,"
  5819. " MU_OFDMA_MIMO:%d\n",
  5820. pdev->stats.rx.reception_type[0],
  5821. pdev->stats.rx.reception_type[1],
  5822. pdev->stats.rx.reception_type[2],
  5823. pdev->stats.rx.reception_type[3]);
  5824. DP_PRINT_STATS("Aggregation:\n");
  5825. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  5826. pdev->stats.rx.ampdu_cnt);
  5827. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  5828. pdev->stats.rx.non_ampdu_cnt);
  5829. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  5830. pdev->stats.rx.amsdu_cnt);
  5831. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  5832. pdev->stats.rx.non_amsdu_cnt);
  5833. }
  5834. /**
  5835. * dp_print_tx_rates(): Print tx rates
  5836. * @vdev: DP_VDEV handle
  5837. *
  5838. * Return:void
  5839. */
  5840. static inline void
  5841. dp_print_tx_rates(struct dp_vdev *vdev)
  5842. {
  5843. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5844. uint8_t index;
  5845. char nss[DP_NSS_LENGTH];
  5846. int nss_index;
  5847. DP_PRINT_STATS("Tx Rate Info:\n");
  5848. dp_print_common_rates_info(pdev->stats.tx.pkt_type);
  5849. DP_PRINT_STATS("SGI ="
  5850. " 0.8us %d"
  5851. " 0.4us %d"
  5852. " 1.6us %d"
  5853. " 3.2us %d",
  5854. pdev->stats.tx.sgi_count[0],
  5855. pdev->stats.tx.sgi_count[1],
  5856. pdev->stats.tx.sgi_count[2],
  5857. pdev->stats.tx.sgi_count[3]);
  5858. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5859. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  5860. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  5861. index = 0;
  5862. for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
  5863. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5864. " %d", pdev->stats.tx.nss[nss_index]);
  5865. }
  5866. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  5867. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  5868. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  5869. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  5870. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  5871. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  5872. DP_PRINT_STATS("Aggregation:\n");
  5873. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  5874. pdev->stats.tx.amsdu_cnt);
  5875. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  5876. pdev->stats.tx.non_amsdu_cnt);
  5877. }
  5878. /**
  5879. * dp_print_peer_stats():print peer stats
  5880. * @peer: DP_PEER handle
  5881. *
  5882. * return void
  5883. */
  5884. static inline void dp_print_peer_stats(struct dp_peer *peer)
  5885. {
  5886. uint8_t i;
  5887. uint32_t index;
  5888. char nss[DP_NSS_LENGTH];
  5889. DP_PRINT_STATS("Node Tx Stats:\n");
  5890. DP_PRINT_STATS("Total Packet Completions = %d",
  5891. peer->stats.tx.comp_pkt.num);
  5892. DP_PRINT_STATS("Total Bytes Completions = %llu",
  5893. peer->stats.tx.comp_pkt.bytes);
  5894. DP_PRINT_STATS("Success Packets = %d",
  5895. peer->stats.tx.tx_success.num);
  5896. DP_PRINT_STATS("Success Bytes = %llu",
  5897. peer->stats.tx.tx_success.bytes);
  5898. DP_PRINT_STATS("Unicast Success Packets = %d",
  5899. peer->stats.tx.ucast.num);
  5900. DP_PRINT_STATS("Unicast Success Bytes = %llu",
  5901. peer->stats.tx.ucast.bytes);
  5902. DP_PRINT_STATS("Multicast Success Packets = %d",
  5903. peer->stats.tx.mcast.num);
  5904. DP_PRINT_STATS("Multicast Success Bytes = %llu",
  5905. peer->stats.tx.mcast.bytes);
  5906. DP_PRINT_STATS("Broadcast Success Packets = %d",
  5907. peer->stats.tx.bcast.num);
  5908. DP_PRINT_STATS("Broadcast Success Bytes = %llu",
  5909. peer->stats.tx.bcast.bytes);
  5910. DP_PRINT_STATS("Packets Failed = %d",
  5911. peer->stats.tx.tx_failed);
  5912. DP_PRINT_STATS("Packets In OFDMA = %d",
  5913. peer->stats.tx.ofdma);
  5914. DP_PRINT_STATS("Packets In STBC = %d",
  5915. peer->stats.tx.stbc);
  5916. DP_PRINT_STATS("Packets In LDPC = %d",
  5917. peer->stats.tx.ldpc);
  5918. DP_PRINT_STATS("Packet Retries = %d",
  5919. peer->stats.tx.retries);
  5920. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  5921. peer->stats.tx.amsdu_cnt);
  5922. DP_PRINT_STATS("Last Packet RSSI = %d",
  5923. peer->stats.tx.last_ack_rssi);
  5924. DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
  5925. peer->stats.tx.dropped.fw_rem.num);
  5926. DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
  5927. peer->stats.tx.dropped.fw_rem.bytes);
  5928. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  5929. peer->stats.tx.dropped.fw_rem_tx);
  5930. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  5931. peer->stats.tx.dropped.fw_rem_notx);
  5932. DP_PRINT_STATS("Dropped : Age Out = %d",
  5933. peer->stats.tx.dropped.age_out);
  5934. DP_PRINT_STATS("NAWDS : ");
  5935. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  5936. peer->stats.tx.nawds_mcast_drop);
  5937. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  5938. peer->stats.tx.nawds_mcast.num);
  5939. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
  5940. peer->stats.tx.nawds_mcast.bytes);
  5941. DP_PRINT_STATS("Rate Info:");
  5942. dp_print_common_rates_info(peer->stats.tx.pkt_type);
  5943. DP_PRINT_STATS("SGI = "
  5944. " 0.8us %d"
  5945. " 0.4us %d"
  5946. " 1.6us %d"
  5947. " 3.2us %d",
  5948. peer->stats.tx.sgi_count[0],
  5949. peer->stats.tx.sgi_count[1],
  5950. peer->stats.tx.sgi_count[2],
  5951. peer->stats.tx.sgi_count[3]);
  5952. DP_PRINT_STATS("Excess Retries per AC ");
  5953. DP_PRINT_STATS(" Best effort = %d",
  5954. peer->stats.tx.excess_retries_per_ac[0]);
  5955. DP_PRINT_STATS(" Background= %d",
  5956. peer->stats.tx.excess_retries_per_ac[1]);
  5957. DP_PRINT_STATS(" Video = %d",
  5958. peer->stats.tx.excess_retries_per_ac[2]);
  5959. DP_PRINT_STATS(" Voice = %d",
  5960. peer->stats.tx.excess_retries_per_ac[3]);
  5961. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  5962. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  5963. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  5964. index = 0;
  5965. for (i = 0; i < SS_COUNT; i++) {
  5966. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5967. " %d", peer->stats.tx.nss[i]);
  5968. }
  5969. DP_PRINT_STATS("NSS(1-8) = %s",
  5970. nss);
  5971. DP_PRINT_STATS("Aggregation:");
  5972. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  5973. peer->stats.tx.amsdu_cnt);
  5974. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  5975. peer->stats.tx.non_amsdu_cnt);
  5976. DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
  5977. DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
  5978. peer->stats.tx.tx_byte_rate);
  5979. DP_PRINT_STATS(" Data transmitted in last sec: %d",
  5980. peer->stats.tx.tx_data_rate);
  5981. DP_PRINT_STATS("Node Rx Stats:");
  5982. DP_PRINT_STATS("Packets Sent To Stack = %d",
  5983. peer->stats.rx.to_stack.num);
  5984. DP_PRINT_STATS("Bytes Sent To Stack = %llu",
  5985. peer->stats.rx.to_stack.bytes);
  5986. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  5987. DP_PRINT_STATS("Ring Id = %d", i);
  5988. DP_PRINT_STATS(" Packets Received = %d",
  5989. peer->stats.rx.rcvd_reo[i].num);
  5990. DP_PRINT_STATS(" Bytes Received = %llu",
  5991. peer->stats.rx.rcvd_reo[i].bytes);
  5992. }
  5993. DP_PRINT_STATS("Multicast Packets Received = %d",
  5994. peer->stats.rx.multicast.num);
  5995. DP_PRINT_STATS("Multicast Bytes Received = %llu",
  5996. peer->stats.rx.multicast.bytes);
  5997. DP_PRINT_STATS("Broadcast Packets Received = %d",
  5998. peer->stats.rx.bcast.num);
  5999. DP_PRINT_STATS("Broadcast Bytes Received = %llu",
  6000. peer->stats.rx.bcast.bytes);
  6001. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  6002. peer->stats.rx.intra_bss.pkts.num);
  6003. DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
  6004. peer->stats.rx.intra_bss.pkts.bytes);
  6005. DP_PRINT_STATS("Raw Packets Received = %d",
  6006. peer->stats.rx.raw.num);
  6007. DP_PRINT_STATS("Raw Bytes Received = %llu",
  6008. peer->stats.rx.raw.bytes);
  6009. DP_PRINT_STATS("Errors: MIC Errors = %d",
  6010. peer->stats.rx.err.mic_err);
  6011. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  6012. peer->stats.rx.err.decrypt_err);
  6013. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  6014. peer->stats.rx.non_ampdu_cnt);
  6015. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  6016. peer->stats.rx.ampdu_cnt);
  6017. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  6018. peer->stats.rx.non_amsdu_cnt);
  6019. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  6020. peer->stats.rx.amsdu_cnt);
  6021. DP_PRINT_STATS("NAWDS : ");
  6022. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  6023. peer->stats.rx.nawds_mcast_drop);
  6024. DP_PRINT_STATS("SGI ="
  6025. " 0.8us %d"
  6026. " 0.4us %d"
  6027. " 1.6us %d"
  6028. " 3.2us %d",
  6029. peer->stats.rx.sgi_count[0],
  6030. peer->stats.rx.sgi_count[1],
  6031. peer->stats.rx.sgi_count[2],
  6032. peer->stats.rx.sgi_count[3]);
  6033. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  6034. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  6035. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  6036. DP_PRINT_STATS("Reception Type ="
  6037. " SU %d,"
  6038. " MU_MIMO %d,"
  6039. " MU_OFDMA %d,"
  6040. " MU_OFDMA_MIMO %d",
  6041. peer->stats.rx.reception_type[0],
  6042. peer->stats.rx.reception_type[1],
  6043. peer->stats.rx.reception_type[2],
  6044. peer->stats.rx.reception_type[3]);
  6045. dp_print_common_rates_info(peer->stats.rx.pkt_type);
  6046. index = 0;
  6047. for (i = 0; i < SS_COUNT; i++) {
  6048. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6049. " %d", peer->stats.rx.nss[i]);
  6050. }
  6051. DP_PRINT_STATS("NSS(1-8) = %s",
  6052. nss);
  6053. DP_PRINT_STATS("Aggregation:");
  6054. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  6055. peer->stats.rx.ampdu_cnt);
  6056. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  6057. peer->stats.rx.non_ampdu_cnt);
  6058. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  6059. peer->stats.rx.amsdu_cnt);
  6060. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  6061. peer->stats.rx.non_amsdu_cnt);
  6062. DP_PRINT_STATS("Bytes and Packets received in last one sec:");
  6063. DP_PRINT_STATS(" Bytes received in last sec: %d",
  6064. peer->stats.rx.rx_byte_rate);
  6065. DP_PRINT_STATS(" Data received in last sec: %d",
  6066. peer->stats.rx.rx_data_rate);
  6067. }
  6068. /*
  6069. * dp_get_host_peer_stats()- function to print peer stats
  6070. * @pdev_handle: DP_PDEV handle
  6071. * @mac_addr: mac address of the peer
  6072. *
  6073. * Return: void
  6074. */
  6075. static void
  6076. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  6077. {
  6078. struct dp_peer *peer;
  6079. uint8_t local_id;
  6080. if (!mac_addr) {
  6081. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6082. "Invalid MAC address\n");
  6083. return;
  6084. }
  6085. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  6086. &local_id);
  6087. if (!peer) {
  6088. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6089. "%s: Invalid peer\n", __func__);
  6090. return;
  6091. }
  6092. dp_print_peer_stats(peer);
  6093. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6094. }
  6095. /**
  6096. * dp_print_soc_cfg_params()- Dump soc wlan config parameters
  6097. * @soc_handle: Soc handle
  6098. *
  6099. * Return: void
  6100. */
  6101. static void
  6102. dp_print_soc_cfg_params(struct dp_soc *soc)
  6103. {
  6104. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  6105. uint8_t index = 0, i = 0;
  6106. char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
  6107. int num_of_int_contexts;
  6108. if (!soc) {
  6109. dp_err("Context is null");
  6110. return;
  6111. }
  6112. soc_cfg_ctx = soc->wlan_cfg_ctx;
  6113. if (!soc_cfg_ctx) {
  6114. dp_err("Context is null");
  6115. return;
  6116. }
  6117. num_of_int_contexts =
  6118. wlan_cfg_get_num_contexts(soc_cfg_ctx);
  6119. DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
  6120. soc_cfg_ctx->num_int_ctxts);
  6121. DP_TRACE_STATS(DEBUG, "Max clients: %u",
  6122. soc_cfg_ctx->max_clients);
  6123. DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
  6124. soc_cfg_ctx->max_alloc_size);
  6125. DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
  6126. soc_cfg_ctx->per_pdev_tx_ring);
  6127. DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
  6128. soc_cfg_ctx->num_tcl_data_rings);
  6129. DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
  6130. soc_cfg_ctx->per_pdev_rx_ring);
  6131. DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
  6132. soc_cfg_ctx->per_pdev_lmac_ring);
  6133. DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
  6134. soc_cfg_ctx->num_reo_dest_rings);
  6135. DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
  6136. soc_cfg_ctx->num_tx_desc_pool);
  6137. DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
  6138. soc_cfg_ctx->num_tx_ext_desc_pool);
  6139. DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
  6140. soc_cfg_ctx->num_tx_desc);
  6141. DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
  6142. soc_cfg_ctx->num_tx_ext_desc);
  6143. DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
  6144. soc_cfg_ctx->htt_packet_type);
  6145. DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
  6146. soc_cfg_ctx->max_peer_id);
  6147. DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
  6148. soc_cfg_ctx->tx_ring_size);
  6149. DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
  6150. soc_cfg_ctx->tx_comp_ring_size);
  6151. DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
  6152. soc_cfg_ctx->tx_comp_ring_size_nss);
  6153. DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
  6154. soc_cfg_ctx->int_batch_threshold_tx);
  6155. DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
  6156. soc_cfg_ctx->int_timer_threshold_tx);
  6157. DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
  6158. soc_cfg_ctx->int_batch_threshold_rx);
  6159. DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
  6160. soc_cfg_ctx->int_timer_threshold_rx);
  6161. DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
  6162. soc_cfg_ctx->int_batch_threshold_other);
  6163. DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
  6164. soc_cfg_ctx->int_timer_threshold_other);
  6165. for (i = 0; i < num_of_int_contexts; i++) {
  6166. index += qdf_snprint(&ring_mask[index],
  6167. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6168. " %d",
  6169. soc_cfg_ctx->int_tx_ring_mask[i]);
  6170. }
  6171. DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
  6172. num_of_int_contexts, ring_mask);
  6173. index = 0;
  6174. for (i = 0; i < num_of_int_contexts; i++) {
  6175. index += qdf_snprint(&ring_mask[index],
  6176. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6177. " %d",
  6178. soc_cfg_ctx->int_rx_ring_mask[i]);
  6179. }
  6180. DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
  6181. num_of_int_contexts, ring_mask);
  6182. index = 0;
  6183. for (i = 0; i < num_of_int_contexts; i++) {
  6184. index += qdf_snprint(&ring_mask[index],
  6185. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6186. " %d",
  6187. soc_cfg_ctx->int_rx_mon_ring_mask[i]);
  6188. }
  6189. DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
  6190. num_of_int_contexts, ring_mask);
  6191. index = 0;
  6192. for (i = 0; i < num_of_int_contexts; i++) {
  6193. index += qdf_snprint(&ring_mask[index],
  6194. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6195. " %d",
  6196. soc_cfg_ctx->int_rx_err_ring_mask[i]);
  6197. }
  6198. DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
  6199. num_of_int_contexts, ring_mask);
  6200. index = 0;
  6201. for (i = 0; i < num_of_int_contexts; i++) {
  6202. index += qdf_snprint(&ring_mask[index],
  6203. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6204. " %d",
  6205. soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
  6206. }
  6207. DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
  6208. num_of_int_contexts, ring_mask);
  6209. index = 0;
  6210. for (i = 0; i < num_of_int_contexts; i++) {
  6211. index += qdf_snprint(&ring_mask[index],
  6212. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6213. " %d",
  6214. soc_cfg_ctx->int_reo_status_ring_mask[i]);
  6215. }
  6216. DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
  6217. num_of_int_contexts, ring_mask);
  6218. index = 0;
  6219. for (i = 0; i < num_of_int_contexts; i++) {
  6220. index += qdf_snprint(&ring_mask[index],
  6221. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6222. " %d",
  6223. soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
  6224. }
  6225. DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
  6226. num_of_int_contexts, ring_mask);
  6227. index = 0;
  6228. for (i = 0; i < num_of_int_contexts; i++) {
  6229. index += qdf_snprint(&ring_mask[index],
  6230. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6231. " %d",
  6232. soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
  6233. }
  6234. DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
  6235. num_of_int_contexts, ring_mask);
  6236. DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
  6237. soc_cfg_ctx->rx_hash);
  6238. DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
  6239. soc_cfg_ctx->tso_enabled);
  6240. DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
  6241. soc_cfg_ctx->lro_enabled);
  6242. DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
  6243. soc_cfg_ctx->sg_enabled);
  6244. DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
  6245. soc_cfg_ctx->gro_enabled);
  6246. DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
  6247. soc_cfg_ctx->rawmode_enabled);
  6248. DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
  6249. soc_cfg_ctx->peer_flow_ctrl_enabled);
  6250. DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
  6251. soc_cfg_ctx->napi_enabled);
  6252. DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
  6253. soc_cfg_ctx->tcp_udp_checksumoffload);
  6254. DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
  6255. soc_cfg_ctx->defrag_timeout_check);
  6256. DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
  6257. soc_cfg_ctx->rx_defrag_min_timeout);
  6258. DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
  6259. soc_cfg_ctx->wbm_release_ring);
  6260. DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
  6261. soc_cfg_ctx->tcl_cmd_ring);
  6262. DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
  6263. soc_cfg_ctx->tcl_status_ring);
  6264. DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
  6265. soc_cfg_ctx->reo_reinject_ring);
  6266. DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
  6267. soc_cfg_ctx->rx_release_ring);
  6268. DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
  6269. soc_cfg_ctx->reo_exception_ring);
  6270. DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
  6271. soc_cfg_ctx->reo_cmd_ring);
  6272. DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
  6273. soc_cfg_ctx->reo_status_ring);
  6274. DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
  6275. soc_cfg_ctx->rxdma_refill_ring);
  6276. DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
  6277. soc_cfg_ctx->rxdma_err_dst_ring);
  6278. }
  6279. /**
  6280. * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
  6281. * @pdev_handle: DP pdev handle
  6282. *
  6283. * Return - void
  6284. */
  6285. static void
  6286. dp_print_pdev_cfg_params(struct dp_pdev *pdev)
  6287. {
  6288. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  6289. if (!pdev) {
  6290. dp_err("Context is null");
  6291. return;
  6292. }
  6293. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  6294. if (!pdev_cfg_ctx) {
  6295. dp_err("Context is null");
  6296. return;
  6297. }
  6298. DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
  6299. pdev_cfg_ctx->rx_dma_buf_ring_size);
  6300. DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
  6301. pdev_cfg_ctx->dma_mon_buf_ring_size);
  6302. DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
  6303. pdev_cfg_ctx->dma_mon_dest_ring_size);
  6304. DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
  6305. pdev_cfg_ctx->dma_mon_status_ring_size);
  6306. DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
  6307. pdev_cfg_ctx->rxdma_monitor_desc_ring);
  6308. DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
  6309. pdev_cfg_ctx->num_mac_rings);
  6310. }
  6311. /**
  6312. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6313. *
  6314. * Return: None
  6315. */
  6316. static void dp_txrx_stats_help(void)
  6317. {
  6318. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6319. dp_info("stats_option:");
  6320. dp_info(" 1 -- HTT Tx Statistics");
  6321. dp_info(" 2 -- HTT Rx Statistics");
  6322. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6323. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6324. dp_info(" 5 -- HTT Error Statistics");
  6325. dp_info(" 6 -- HTT TQM Statistics");
  6326. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6327. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6328. dp_info(" 9 -- HTT Tx Rate Statistics");
  6329. dp_info(" 10 -- HTT Rx Rate Statistics");
  6330. dp_info(" 11 -- HTT Peer Statistics");
  6331. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6332. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6333. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6334. dp_info(" 15 -- HTT SRNG Statistics");
  6335. dp_info(" 16 -- HTT SFM Info Statistics");
  6336. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6337. dp_info(" 18 -- HTT Peer List Details");
  6338. dp_info(" 20 -- Clear Host Statistics");
  6339. dp_info(" 21 -- Host Rx Rate Statistics");
  6340. dp_info(" 22 -- Host Tx Rate Statistics");
  6341. dp_info(" 23 -- Host Tx Statistics");
  6342. dp_info(" 24 -- Host Rx Statistics");
  6343. dp_info(" 25 -- Host AST Statistics");
  6344. dp_info(" 26 -- Host SRNG PTR Statistics");
  6345. dp_info(" 27 -- Host Mon Statistics");
  6346. dp_info(" 28 -- Host REO Queue Statistics");
  6347. dp_info(" 29 -- Host Soc cfg param Statistics");
  6348. dp_info(" 30 -- Host pdev cfg param Statistics");
  6349. }
  6350. /**
  6351. * dp_print_host_stats()- Function to print the stats aggregated at host
  6352. * @vdev_handle: DP_VDEV handle
  6353. * @type: host stats type
  6354. *
  6355. * Return: 0 on success, print error message in case of failure
  6356. */
  6357. static int
  6358. dp_print_host_stats(struct cdp_vdev *vdev_handle,
  6359. struct cdp_txrx_stats_req *req)
  6360. {
  6361. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6362. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6363. enum cdp_host_txrx_stats type =
  6364. dp_stats_mapping_table[req->stats][STATS_HOST];
  6365. dp_aggregate_pdev_stats(pdev);
  6366. switch (type) {
  6367. case TXRX_CLEAR_STATS:
  6368. dp_txrx_host_stats_clr(vdev);
  6369. break;
  6370. case TXRX_RX_RATE_STATS:
  6371. dp_print_rx_rates(vdev);
  6372. break;
  6373. case TXRX_TX_RATE_STATS:
  6374. dp_print_tx_rates(vdev);
  6375. break;
  6376. case TXRX_TX_HOST_STATS:
  6377. dp_print_pdev_tx_stats(pdev);
  6378. dp_print_soc_tx_stats(pdev->soc);
  6379. break;
  6380. case TXRX_RX_HOST_STATS:
  6381. dp_print_pdev_rx_stats(pdev);
  6382. dp_print_soc_rx_stats(pdev->soc);
  6383. break;
  6384. case TXRX_AST_STATS:
  6385. dp_print_ast_stats(pdev->soc);
  6386. dp_print_peer_table(vdev);
  6387. break;
  6388. case TXRX_SRNG_PTR_STATS:
  6389. dp_print_ring_stats(pdev);
  6390. break;
  6391. case TXRX_RX_MON_STATS:
  6392. dp_print_pdev_rx_mon_stats(pdev);
  6393. break;
  6394. case TXRX_REO_QUEUE_STATS:
  6395. dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
  6396. break;
  6397. case TXRX_SOC_CFG_PARAMS:
  6398. dp_print_soc_cfg_params(pdev->soc);
  6399. break;
  6400. case TXRX_PDEV_CFG_PARAMS:
  6401. dp_print_pdev_cfg_params(pdev);
  6402. break;
  6403. default:
  6404. dp_info("Wrong Input For TxRx Host Stats");
  6405. dp_txrx_stats_help();
  6406. break;
  6407. }
  6408. return 0;
  6409. }
  6410. /*
  6411. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6412. * @pdev: DP_PDEV handle
  6413. *
  6414. * Return: void
  6415. */
  6416. static void
  6417. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6418. {
  6419. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6420. int mac_id;
  6421. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  6422. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6423. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6424. pdev->pdev_id);
  6425. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6426. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6427. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6428. }
  6429. }
  6430. /*
  6431. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6432. * @pdev: DP_PDEV handle
  6433. *
  6434. * Return: void
  6435. */
  6436. static void
  6437. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6438. {
  6439. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6440. int mac_id;
  6441. htt_tlv_filter.mpdu_start = 1;
  6442. htt_tlv_filter.msdu_start = 0;
  6443. htt_tlv_filter.packet = 0;
  6444. htt_tlv_filter.msdu_end = 0;
  6445. htt_tlv_filter.mpdu_end = 0;
  6446. htt_tlv_filter.attention = 0;
  6447. htt_tlv_filter.ppdu_start = 1;
  6448. htt_tlv_filter.ppdu_end = 1;
  6449. htt_tlv_filter.ppdu_end_user_stats = 1;
  6450. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6451. htt_tlv_filter.ppdu_end_status_done = 1;
  6452. htt_tlv_filter.enable_fp = 1;
  6453. htt_tlv_filter.enable_md = 0;
  6454. if (pdev->neighbour_peers_added &&
  6455. pdev->soc->hw_nac_monitor_support) {
  6456. htt_tlv_filter.enable_md = 1;
  6457. htt_tlv_filter.packet_header = 1;
  6458. }
  6459. if (pdev->mcopy_mode) {
  6460. htt_tlv_filter.packet_header = 1;
  6461. htt_tlv_filter.enable_mo = 1;
  6462. }
  6463. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6464. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6465. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6466. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6467. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6468. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6469. if (pdev->neighbour_peers_added &&
  6470. pdev->soc->hw_nac_monitor_support)
  6471. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6472. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6473. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6474. pdev->pdev_id);
  6475. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6476. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6477. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6478. }
  6479. }
  6480. /*
  6481. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6482. * modes are enabled or not.
  6483. * @dp_pdev: dp pdev handle.
  6484. *
  6485. * Return: bool
  6486. */
  6487. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6488. {
  6489. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6490. !pdev->mcopy_mode)
  6491. return true;
  6492. else
  6493. return false;
  6494. }
  6495. /*
  6496. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6497. *@pdev_handle: DP_PDEV handle.
  6498. *@val: Provided value.
  6499. *
  6500. *Return: void
  6501. */
  6502. static void
  6503. dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
  6504. {
  6505. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6506. switch (val) {
  6507. case CDP_BPR_DISABLE:
  6508. pdev->bpr_enable = CDP_BPR_DISABLE;
  6509. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6510. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6511. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6512. } else if (pdev->enhanced_stats_en &&
  6513. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6514. !pdev->pktlog_ppdu_stats) {
  6515. dp_h2t_cfg_stats_msg_send(pdev,
  6516. DP_PPDU_STATS_CFG_ENH_STATS,
  6517. pdev->pdev_id);
  6518. }
  6519. break;
  6520. case CDP_BPR_ENABLE:
  6521. pdev->bpr_enable = CDP_BPR_ENABLE;
  6522. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6523. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6524. dp_h2t_cfg_stats_msg_send(pdev,
  6525. DP_PPDU_STATS_CFG_BPR,
  6526. pdev->pdev_id);
  6527. } else if (pdev->enhanced_stats_en &&
  6528. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6529. !pdev->pktlog_ppdu_stats) {
  6530. dp_h2t_cfg_stats_msg_send(pdev,
  6531. DP_PPDU_STATS_CFG_BPR_ENH,
  6532. pdev->pdev_id);
  6533. } else if (pdev->pktlog_ppdu_stats) {
  6534. dp_h2t_cfg_stats_msg_send(pdev,
  6535. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6536. pdev->pdev_id);
  6537. }
  6538. break;
  6539. default:
  6540. break;
  6541. }
  6542. }
  6543. /*
  6544. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6545. * @pdev_handle: DP_PDEV handle
  6546. * @val: user provided value
  6547. *
  6548. * Return: void
  6549. */
  6550. static void
  6551. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6552. {
  6553. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6554. switch (val) {
  6555. case 0:
  6556. pdev->tx_sniffer_enable = 0;
  6557. pdev->mcopy_mode = 0;
  6558. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6559. !pdev->bpr_enable) {
  6560. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6561. dp_ppdu_ring_reset(pdev);
  6562. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6563. dp_h2t_cfg_stats_msg_send(pdev,
  6564. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6565. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6566. dp_h2t_cfg_stats_msg_send(pdev,
  6567. DP_PPDU_STATS_CFG_BPR_ENH,
  6568. pdev->pdev_id);
  6569. } else {
  6570. dp_h2t_cfg_stats_msg_send(pdev,
  6571. DP_PPDU_STATS_CFG_BPR,
  6572. pdev->pdev_id);
  6573. }
  6574. break;
  6575. case 1:
  6576. pdev->tx_sniffer_enable = 1;
  6577. pdev->mcopy_mode = 0;
  6578. if (!pdev->pktlog_ppdu_stats)
  6579. dp_h2t_cfg_stats_msg_send(pdev,
  6580. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6581. break;
  6582. case 2:
  6583. pdev->mcopy_mode = 1;
  6584. pdev->tx_sniffer_enable = 0;
  6585. dp_ppdu_ring_cfg(pdev);
  6586. if (!pdev->pktlog_ppdu_stats)
  6587. dp_h2t_cfg_stats_msg_send(pdev,
  6588. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6589. break;
  6590. default:
  6591. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6592. "Invalid value");
  6593. break;
  6594. }
  6595. }
  6596. /*
  6597. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6598. * @pdev_handle: DP_PDEV handle
  6599. *
  6600. * Return: void
  6601. */
  6602. static void
  6603. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6604. {
  6605. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6606. if (pdev->enhanced_stats_en == 0)
  6607. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6608. pdev->enhanced_stats_en = 1;
  6609. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6610. !pdev->monitor_vdev)
  6611. dp_ppdu_ring_cfg(pdev);
  6612. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6613. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6614. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6615. dp_h2t_cfg_stats_msg_send(pdev,
  6616. DP_PPDU_STATS_CFG_BPR_ENH,
  6617. pdev->pdev_id);
  6618. }
  6619. }
  6620. /*
  6621. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6622. * @pdev_handle: DP_PDEV handle
  6623. *
  6624. * Return: void
  6625. */
  6626. static void
  6627. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6628. {
  6629. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6630. if (pdev->enhanced_stats_en == 1)
  6631. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6632. pdev->enhanced_stats_en = 0;
  6633. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6634. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6635. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6636. dp_h2t_cfg_stats_msg_send(pdev,
  6637. DP_PPDU_STATS_CFG_BPR,
  6638. pdev->pdev_id);
  6639. }
  6640. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6641. !pdev->monitor_vdev)
  6642. dp_ppdu_ring_reset(pdev);
  6643. }
  6644. /*
  6645. * dp_get_fw_peer_stats()- function to print peer stats
  6646. * @pdev_handle: DP_PDEV handle
  6647. * @mac_addr: mac address of the peer
  6648. * @cap: Type of htt stats requested
  6649. *
  6650. * Currently Supporting only MAC ID based requests Only
  6651. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6652. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6653. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6654. *
  6655. * Return: void
  6656. */
  6657. static void
  6658. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  6659. uint32_t cap)
  6660. {
  6661. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6662. int i;
  6663. uint32_t config_param0 = 0;
  6664. uint32_t config_param1 = 0;
  6665. uint32_t config_param2 = 0;
  6666. uint32_t config_param3 = 0;
  6667. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6668. config_param0 |= (1 << (cap + 1));
  6669. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6670. config_param1 |= (1 << i);
  6671. }
  6672. config_param2 |= (mac_addr[0] & 0x000000ff);
  6673. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6674. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6675. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6676. config_param3 |= (mac_addr[4] & 0x000000ff);
  6677. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6678. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6679. config_param0, config_param1, config_param2,
  6680. config_param3, 0, 0, 0);
  6681. }
  6682. /* This struct definition will be removed from here
  6683. * once it get added in FW headers*/
  6684. struct httstats_cmd_req {
  6685. uint32_t config_param0;
  6686. uint32_t config_param1;
  6687. uint32_t config_param2;
  6688. uint32_t config_param3;
  6689. int cookie;
  6690. u_int8_t stats_id;
  6691. };
  6692. /*
  6693. * dp_get_htt_stats: function to process the httstas request
  6694. * @pdev_handle: DP pdev handle
  6695. * @data: pointer to request data
  6696. * @data_len: length for request data
  6697. *
  6698. * return: void
  6699. */
  6700. static void
  6701. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  6702. {
  6703. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6704. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6705. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6706. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6707. req->config_param0, req->config_param1,
  6708. req->config_param2, req->config_param3,
  6709. req->cookie, 0, 0);
  6710. }
  6711. /*
  6712. * dp_set_pdev_param: function to set parameters in pdev
  6713. * @pdev_handle: DP pdev handle
  6714. * @param: parameter type to be set
  6715. * @val: value of parameter to be set
  6716. *
  6717. * return: void
  6718. */
  6719. static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6720. enum cdp_pdev_param_type param, uint8_t val)
  6721. {
  6722. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6723. switch (param) {
  6724. case CDP_CONFIG_DEBUG_SNIFFER:
  6725. dp_config_debug_sniffer(pdev_handle, val);
  6726. break;
  6727. case CDP_CONFIG_BPR_ENABLE:
  6728. dp_set_bpr_enable(pdev_handle, val);
  6729. break;
  6730. case CDP_CONFIG_PRIMARY_RADIO:
  6731. pdev->is_primary = val;
  6732. break;
  6733. default:
  6734. break;
  6735. }
  6736. }
  6737. /*
  6738. * dp_get_vdev_param: function to get parameters from vdev
  6739. * @param: parameter type to get value
  6740. *
  6741. * return: void
  6742. */
  6743. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  6744. enum cdp_vdev_param_type param)
  6745. {
  6746. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6747. uint32_t val;
  6748. switch (param) {
  6749. case CDP_ENABLE_WDS:
  6750. val = vdev->wds_enabled;
  6751. break;
  6752. case CDP_ENABLE_MEC:
  6753. val = vdev->mec_enabled;
  6754. break;
  6755. case CDP_ENABLE_DA_WAR:
  6756. val = vdev->da_war_enabled;
  6757. break;
  6758. default:
  6759. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6760. "param value %d is wrong\n",
  6761. param);
  6762. val = -1;
  6763. break;
  6764. }
  6765. return val;
  6766. }
  6767. /*
  6768. * dp_set_vdev_param: function to set parameters in vdev
  6769. * @param: parameter type to be set
  6770. * @val: value of parameter to be set
  6771. *
  6772. * return: void
  6773. */
  6774. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  6775. enum cdp_vdev_param_type param, uint32_t val)
  6776. {
  6777. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6778. switch (param) {
  6779. case CDP_ENABLE_WDS:
  6780. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6781. "wds_enable %d for vdev(%p) id(%d)\n",
  6782. val, vdev, vdev->vdev_id);
  6783. vdev->wds_enabled = val;
  6784. break;
  6785. case CDP_ENABLE_MEC:
  6786. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6787. "mec_enable %d for vdev(%p) id(%d)\n",
  6788. val, vdev, vdev->vdev_id);
  6789. vdev->mec_enabled = val;
  6790. break;
  6791. case CDP_ENABLE_DA_WAR:
  6792. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6793. "da_war_enable %d for vdev(%p) id(%d)\n",
  6794. val, vdev, vdev->vdev_id);
  6795. vdev->da_war_enabled = val;
  6796. break;
  6797. case CDP_ENABLE_NAWDS:
  6798. vdev->nawds_enabled = val;
  6799. break;
  6800. case CDP_ENABLE_MCAST_EN:
  6801. vdev->mcast_enhancement_en = val;
  6802. break;
  6803. case CDP_ENABLE_PROXYSTA:
  6804. vdev->proxysta_vdev = val;
  6805. break;
  6806. case CDP_UPDATE_TDLS_FLAGS:
  6807. vdev->tdls_link_connected = val;
  6808. break;
  6809. case CDP_CFG_WDS_AGING_TIMER:
  6810. if (val == 0)
  6811. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  6812. else if (val != vdev->wds_aging_timer_val)
  6813. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  6814. vdev->wds_aging_timer_val = val;
  6815. break;
  6816. case CDP_ENABLE_AP_BRIDGE:
  6817. if (wlan_op_mode_sta != vdev->opmode)
  6818. vdev->ap_bridge_enabled = val;
  6819. else
  6820. vdev->ap_bridge_enabled = false;
  6821. break;
  6822. case CDP_ENABLE_CIPHER:
  6823. vdev->sec_type = val;
  6824. break;
  6825. case CDP_ENABLE_QWRAP_ISOLATION:
  6826. vdev->isolation_vdev = val;
  6827. break;
  6828. default:
  6829. break;
  6830. }
  6831. dp_tx_vdev_update_search_flags(vdev);
  6832. }
  6833. /**
  6834. * dp_peer_set_nawds: set nawds bit in peer
  6835. * @peer_handle: pointer to peer
  6836. * @value: enable/disable nawds
  6837. *
  6838. * return: void
  6839. */
  6840. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  6841. {
  6842. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6843. peer->nawds_enabled = value;
  6844. }
  6845. /*
  6846. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  6847. * @vdev_handle: DP_VDEV handle
  6848. * @map_id:ID of map that needs to be updated
  6849. *
  6850. * Return: void
  6851. */
  6852. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  6853. uint8_t map_id)
  6854. {
  6855. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6856. vdev->dscp_tid_map_id = map_id;
  6857. return;
  6858. }
  6859. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  6860. * @peer_handle: DP pdev handle
  6861. *
  6862. * return : cdp_pdev_stats pointer
  6863. */
  6864. static struct cdp_pdev_stats*
  6865. dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
  6866. {
  6867. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6868. dp_aggregate_pdev_stats(pdev);
  6869. return &pdev->stats;
  6870. }
  6871. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  6872. * @peer_handle: DP_PEER handle
  6873. *
  6874. * return : cdp_peer_stats pointer
  6875. */
  6876. static struct cdp_peer_stats*
  6877. dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
  6878. {
  6879. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6880. qdf_assert(peer);
  6881. return &peer->stats;
  6882. }
  6883. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  6884. * @peer_handle: DP_PEER handle
  6885. *
  6886. * return : void
  6887. */
  6888. static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
  6889. {
  6890. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6891. qdf_assert(peer);
  6892. qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
  6893. }
  6894. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  6895. * @vdev_handle: DP_VDEV handle
  6896. * @buf: buffer for vdev stats
  6897. *
  6898. * return : int
  6899. */
  6900. static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
  6901. bool is_aggregate)
  6902. {
  6903. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6904. struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
  6905. if (is_aggregate)
  6906. dp_aggregate_vdev_stats(vdev, buf);
  6907. else
  6908. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  6909. return 0;
  6910. }
  6911. /*
  6912. * dp_get_total_per(): get total per
  6913. * @pdev_handle: DP_PDEV handle
  6914. *
  6915. * Return: % error rate using retries per packet and success packets
  6916. */
  6917. static int dp_get_total_per(struct cdp_pdev *pdev_handle)
  6918. {
  6919. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6920. dp_aggregate_pdev_stats(pdev);
  6921. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  6922. return 0;
  6923. return ((pdev->stats.tx.retries * 100) /
  6924. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  6925. }
  6926. /*
  6927. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  6928. * @pdev_handle: DP_PDEV handle
  6929. * @buf: to hold pdev_stats
  6930. *
  6931. * Return: int
  6932. */
  6933. static int
  6934. dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
  6935. {
  6936. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6937. struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
  6938. struct cdp_txrx_stats_req req = {0,};
  6939. dp_aggregate_pdev_stats(pdev);
  6940. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  6941. req.cookie_val = 1;
  6942. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  6943. req.param1, req.param2, req.param3, 0,
  6944. req.cookie_val, 0);
  6945. msleep(DP_MAX_SLEEP_TIME);
  6946. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  6947. req.cookie_val = 1;
  6948. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  6949. req.param1, req.param2, req.param3, 0,
  6950. req.cookie_val, 0);
  6951. msleep(DP_MAX_SLEEP_TIME);
  6952. qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
  6953. return TXRX_STATS_LEVEL;
  6954. }
  6955. /**
  6956. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  6957. * @pdev: DP_PDEV handle
  6958. * @map_id: ID of map that needs to be updated
  6959. * @tos: index value in map
  6960. * @tid: tid value passed by the user
  6961. *
  6962. * Return: void
  6963. */
  6964. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  6965. uint8_t map_id, uint8_t tos, uint8_t tid)
  6966. {
  6967. uint8_t dscp;
  6968. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  6969. struct dp_soc *soc = pdev->soc;
  6970. if (!soc)
  6971. return;
  6972. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  6973. pdev->dscp_tid_map[map_id][dscp] = tid;
  6974. if (map_id < soc->num_hw_dscp_tid_map)
  6975. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  6976. map_id, dscp);
  6977. return;
  6978. }
  6979. /**
  6980. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  6981. * @pdev_handle: pdev handle
  6982. * @val: hmmc-dscp flag value
  6983. *
  6984. * Return: void
  6985. */
  6986. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  6987. bool val)
  6988. {
  6989. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6990. pdev->hmmc_tid_override_en = val;
  6991. }
  6992. /**
  6993. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  6994. * @pdev_handle: pdev handle
  6995. * @tid: tid value
  6996. *
  6997. * Return: void
  6998. */
  6999. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7000. uint8_t tid)
  7001. {
  7002. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7003. pdev->hmmc_tid = tid;
  7004. }
  7005. /**
  7006. * dp_fw_stats_process(): Process TxRX FW stats request
  7007. * @vdev_handle: DP VDEV handle
  7008. * @req: stats request
  7009. *
  7010. * return: int
  7011. */
  7012. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  7013. struct cdp_txrx_stats_req *req)
  7014. {
  7015. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7016. struct dp_pdev *pdev = NULL;
  7017. uint32_t stats = req->stats;
  7018. uint8_t mac_id = req->mac_id;
  7019. if (!vdev) {
  7020. DP_TRACE(NONE, "VDEV not found");
  7021. return 1;
  7022. }
  7023. pdev = vdev->pdev;
  7024. /*
  7025. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7026. * from param0 to param3 according to below rule:
  7027. *
  7028. * PARAM:
  7029. * - config_param0 : start_offset (stats type)
  7030. * - config_param1 : stats bmask from start offset
  7031. * - config_param2 : stats bmask from start offset + 32
  7032. * - config_param3 : stats bmask from start offset + 64
  7033. */
  7034. if (req->stats == CDP_TXRX_STATS_0) {
  7035. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7036. req->param1 = 0xFFFFFFFF;
  7037. req->param2 = 0xFFFFFFFF;
  7038. req->param3 = 0xFFFFFFFF;
  7039. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7040. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7041. }
  7042. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7043. req->param1, req->param2, req->param3,
  7044. 0, 0, mac_id);
  7045. }
  7046. /**
  7047. * dp_txrx_stats_request - function to map to firmware and host stats
  7048. * @vdev: virtual handle
  7049. * @req: stats request
  7050. *
  7051. * Return: QDF_STATUS
  7052. */
  7053. static
  7054. QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
  7055. struct cdp_txrx_stats_req *req)
  7056. {
  7057. int host_stats;
  7058. int fw_stats;
  7059. enum cdp_stats stats;
  7060. int num_stats;
  7061. if (!vdev || !req) {
  7062. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7063. "Invalid vdev/req instance");
  7064. return QDF_STATUS_E_INVAL;
  7065. }
  7066. stats = req->stats;
  7067. if (stats >= CDP_TXRX_MAX_STATS)
  7068. return QDF_STATUS_E_INVAL;
  7069. /*
  7070. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7071. * has to be updated if new FW HTT stats added
  7072. */
  7073. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7074. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7075. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7076. if (stats >= num_stats) {
  7077. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7078. "%s: Invalid stats option: %d", __func__, stats);
  7079. return QDF_STATUS_E_INVAL;
  7080. }
  7081. req->stats = stats;
  7082. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7083. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7084. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7085. "stats: %u fw_stats_type: %d host_stats: %d",
  7086. stats, fw_stats, host_stats);
  7087. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7088. /* update request with FW stats type */
  7089. req->stats = fw_stats;
  7090. return dp_fw_stats_process(vdev, req);
  7091. }
  7092. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7093. (host_stats <= TXRX_HOST_STATS_MAX))
  7094. return dp_print_host_stats(vdev, req);
  7095. else
  7096. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7097. "Wrong Input for TxRx Stats");
  7098. return QDF_STATUS_SUCCESS;
  7099. }
  7100. /*
  7101. * dp_print_napi_stats(): NAPI stats
  7102. * @soc - soc handle
  7103. */
  7104. static void dp_print_napi_stats(struct dp_soc *soc)
  7105. {
  7106. hif_print_napi_stats(soc->hif_handle);
  7107. }
  7108. /*
  7109. * dp_print_per_ring_stats(): Packet count per ring
  7110. * @soc - soc handle
  7111. */
  7112. static void dp_print_per_ring_stats(struct dp_soc *soc)
  7113. {
  7114. uint8_t ring;
  7115. uint16_t core;
  7116. uint64_t total_packets;
  7117. DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
  7118. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  7119. total_packets = 0;
  7120. DP_TRACE_STATS(INFO_HIGH,
  7121. "Packets on ring %u:", ring);
  7122. for (core = 0; core < NR_CPUS; core++) {
  7123. DP_TRACE_STATS(INFO_HIGH,
  7124. "Packets arriving on core %u: %llu",
  7125. core,
  7126. soc->stats.rx.ring_packets[core][ring]);
  7127. total_packets += soc->stats.rx.ring_packets[core][ring];
  7128. }
  7129. DP_TRACE_STATS(INFO_HIGH,
  7130. "Total packets on ring %u: %llu",
  7131. ring, total_packets);
  7132. }
  7133. }
  7134. /*
  7135. * dp_txrx_path_stats() - Function to display dump stats
  7136. * @soc - soc handle
  7137. *
  7138. * return: none
  7139. */
  7140. static void dp_txrx_path_stats(struct dp_soc *soc)
  7141. {
  7142. uint8_t error_code;
  7143. uint8_t loop_pdev;
  7144. struct dp_pdev *pdev;
  7145. uint8_t i;
  7146. if (!soc) {
  7147. DP_TRACE(ERROR, "%s: Invalid access",
  7148. __func__);
  7149. return;
  7150. }
  7151. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  7152. pdev = soc->pdev_list[loop_pdev];
  7153. dp_aggregate_pdev_stats(pdev);
  7154. DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
  7155. DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
  7156. pdev->stats.tx_i.rcvd.num,
  7157. pdev->stats.tx_i.rcvd.bytes);
  7158. DP_TRACE_STATS(INFO_HIGH,
  7159. "processed from host: %u msdus (%llu bytes)",
  7160. pdev->stats.tx_i.processed.num,
  7161. pdev->stats.tx_i.processed.bytes);
  7162. DP_TRACE_STATS(INFO_HIGH,
  7163. "successfully transmitted: %u msdus (%llu bytes)",
  7164. pdev->stats.tx.tx_success.num,
  7165. pdev->stats.tx.tx_success.bytes);
  7166. DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
  7167. DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
  7168. pdev->stats.tx_i.dropped.dropped_pkt.num);
  7169. DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
  7170. pdev->stats.tx_i.dropped.desc_na.num);
  7171. DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
  7172. pdev->stats.tx_i.dropped.ring_full);
  7173. DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
  7174. pdev->stats.tx_i.dropped.enqueue_fail);
  7175. DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
  7176. pdev->stats.tx_i.dropped.dma_error);
  7177. DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
  7178. DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
  7179. pdev->stats.tx.tx_failed);
  7180. DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
  7181. pdev->stats.tx.dropped.age_out);
  7182. DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
  7183. pdev->stats.tx.dropped.fw_rem.num);
  7184. DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
  7185. pdev->stats.tx.dropped.fw_rem.bytes);
  7186. DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
  7187. pdev->stats.tx.dropped.fw_rem_tx);
  7188. DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
  7189. pdev->stats.tx.dropped.fw_rem_notx);
  7190. DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
  7191. pdev->soc->stats.tx.tx_invalid_peer.num);
  7192. DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
  7193. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7194. pdev->stats.tx_comp_histogram.pkts_1);
  7195. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7196. pdev->stats.tx_comp_histogram.pkts_2_20);
  7197. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7198. pdev->stats.tx_comp_histogram.pkts_21_40);
  7199. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7200. pdev->stats.tx_comp_histogram.pkts_41_60);
  7201. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7202. pdev->stats.tx_comp_histogram.pkts_61_80);
  7203. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7204. pdev->stats.tx_comp_histogram.pkts_81_100);
  7205. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7206. pdev->stats.tx_comp_histogram.pkts_101_200);
  7207. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7208. pdev->stats.tx_comp_histogram.pkts_201_plus);
  7209. DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
  7210. DP_TRACE_STATS(INFO_HIGH,
  7211. "delivered %u msdus ( %llu bytes),",
  7212. pdev->stats.rx.to_stack.num,
  7213. pdev->stats.rx.to_stack.bytes);
  7214. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  7215. DP_TRACE_STATS(INFO_HIGH,
  7216. "received on reo[%d] %u msdus( %llu bytes),",
  7217. i, pdev->stats.rx.rcvd_reo[i].num,
  7218. pdev->stats.rx.rcvd_reo[i].bytes);
  7219. DP_TRACE_STATS(INFO_HIGH,
  7220. "intra-bss packets %u msdus ( %llu bytes),",
  7221. pdev->stats.rx.intra_bss.pkts.num,
  7222. pdev->stats.rx.intra_bss.pkts.bytes);
  7223. DP_TRACE_STATS(INFO_HIGH,
  7224. "intra-bss fails %u msdus ( %llu bytes),",
  7225. pdev->stats.rx.intra_bss.fail.num,
  7226. pdev->stats.rx.intra_bss.fail.bytes);
  7227. DP_TRACE_STATS(INFO_HIGH,
  7228. "raw packets %u msdus ( %llu bytes),",
  7229. pdev->stats.rx.raw.num,
  7230. pdev->stats.rx.raw.bytes);
  7231. DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
  7232. pdev->stats.rx.err.mic_err);
  7233. DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
  7234. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  7235. DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
  7236. DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
  7237. pdev->soc->stats.rx.err.invalid_rbm);
  7238. DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
  7239. pdev->soc->stats.rx.err.hal_ring_access_fail);
  7240. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  7241. error_code++) {
  7242. if (!pdev->soc->stats.rx.err.reo_error[error_code])
  7243. continue;
  7244. DP_TRACE_STATS(INFO_HIGH,
  7245. "Reo error number (%u): %u msdus",
  7246. error_code,
  7247. pdev->soc->stats.rx.err
  7248. .reo_error[error_code]);
  7249. }
  7250. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  7251. error_code++) {
  7252. if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
  7253. continue;
  7254. DP_TRACE_STATS(INFO_HIGH,
  7255. "Rxdma error number (%u): %u msdus",
  7256. error_code,
  7257. pdev->soc->stats.rx.err
  7258. .rxdma_error[error_code]);
  7259. }
  7260. DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
  7261. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7262. pdev->stats.rx_ind_histogram.pkts_1);
  7263. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7264. pdev->stats.rx_ind_histogram.pkts_2_20);
  7265. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7266. pdev->stats.rx_ind_histogram.pkts_21_40);
  7267. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7268. pdev->stats.rx_ind_histogram.pkts_41_60);
  7269. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7270. pdev->stats.rx_ind_histogram.pkts_61_80);
  7271. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7272. pdev->stats.rx_ind_histogram.pkts_81_100);
  7273. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7274. pdev->stats.rx_ind_histogram.pkts_101_200);
  7275. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7276. pdev->stats.rx_ind_histogram.pkts_201_plus);
  7277. DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  7278. __func__,
  7279. pdev->soc->wlan_cfg_ctx
  7280. ->tso_enabled,
  7281. pdev->soc->wlan_cfg_ctx
  7282. ->lro_enabled,
  7283. pdev->soc->wlan_cfg_ctx
  7284. ->rx_hash,
  7285. pdev->soc->wlan_cfg_ctx
  7286. ->napi_enabled);
  7287. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7288. DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  7289. __func__,
  7290. pdev->soc->wlan_cfg_ctx
  7291. ->tx_flow_stop_queue_threshold,
  7292. pdev->soc->wlan_cfg_ctx
  7293. ->tx_flow_start_queue_offset);
  7294. #endif
  7295. }
  7296. }
  7297. /*
  7298. * dp_txrx_dump_stats() - Dump statistics
  7299. * @value - Statistics option
  7300. */
  7301. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  7302. enum qdf_stats_verbosity_level level)
  7303. {
  7304. struct dp_soc *soc =
  7305. (struct dp_soc *)psoc;
  7306. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7307. if (!soc) {
  7308. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7309. "%s: soc is NULL", __func__);
  7310. return QDF_STATUS_E_INVAL;
  7311. }
  7312. switch (value) {
  7313. case CDP_TXRX_PATH_STATS:
  7314. dp_txrx_path_stats(soc);
  7315. break;
  7316. case CDP_RX_RING_STATS:
  7317. dp_print_per_ring_stats(soc);
  7318. break;
  7319. case CDP_TXRX_TSO_STATS:
  7320. /* TODO: NOT IMPLEMENTED */
  7321. break;
  7322. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7323. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7324. break;
  7325. case CDP_DP_NAPI_STATS:
  7326. dp_print_napi_stats(soc);
  7327. break;
  7328. case CDP_TXRX_DESC_STATS:
  7329. /* TODO: NOT IMPLEMENTED */
  7330. break;
  7331. default:
  7332. status = QDF_STATUS_E_INVAL;
  7333. break;
  7334. }
  7335. return status;
  7336. }
  7337. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7338. /**
  7339. * dp_update_flow_control_parameters() - API to store datapath
  7340. * config parameters
  7341. * @soc: soc handle
  7342. * @cfg: ini parameter handle
  7343. *
  7344. * Return: void
  7345. */
  7346. static inline
  7347. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7348. struct cdp_config_params *params)
  7349. {
  7350. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7351. params->tx_flow_stop_queue_threshold;
  7352. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7353. params->tx_flow_start_queue_offset;
  7354. }
  7355. #else
  7356. static inline
  7357. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7358. struct cdp_config_params *params)
  7359. {
  7360. }
  7361. #endif
  7362. /**
  7363. * dp_update_config_parameters() - API to store datapath
  7364. * config parameters
  7365. * @soc: soc handle
  7366. * @cfg: ini parameter handle
  7367. *
  7368. * Return: status
  7369. */
  7370. static
  7371. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7372. struct cdp_config_params *params)
  7373. {
  7374. struct dp_soc *soc = (struct dp_soc *)psoc;
  7375. if (!(soc)) {
  7376. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7377. "%s: Invalid handle", __func__);
  7378. return QDF_STATUS_E_INVAL;
  7379. }
  7380. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7381. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7382. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7383. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7384. params->tcp_udp_checksumoffload;
  7385. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7386. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7387. dp_update_flow_control_parameters(soc, params);
  7388. return QDF_STATUS_SUCCESS;
  7389. }
  7390. /**
  7391. * dp_txrx_set_wds_rx_policy() - API to store datapath
  7392. * config parameters
  7393. * @vdev_handle - datapath vdev handle
  7394. * @cfg: ini parameter handle
  7395. *
  7396. * Return: status
  7397. */
  7398. #ifdef WDS_VENDOR_EXTENSION
  7399. void
  7400. dp_txrx_set_wds_rx_policy(
  7401. struct cdp_vdev *vdev_handle,
  7402. u_int32_t val)
  7403. {
  7404. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7405. struct dp_peer *peer;
  7406. if (vdev->opmode == wlan_op_mode_ap) {
  7407. /* for ap, set it on bss_peer */
  7408. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  7409. if (peer->bss_peer) {
  7410. peer->wds_ecm.wds_rx_filter = 1;
  7411. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7412. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7413. break;
  7414. }
  7415. }
  7416. } else if (vdev->opmode == wlan_op_mode_sta) {
  7417. peer = TAILQ_FIRST(&vdev->peer_list);
  7418. peer->wds_ecm.wds_rx_filter = 1;
  7419. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7420. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7421. }
  7422. }
  7423. /**
  7424. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  7425. *
  7426. * @peer_handle - datapath peer handle
  7427. * @wds_tx_ucast: policy for unicast transmission
  7428. * @wds_tx_mcast: policy for multicast transmission
  7429. *
  7430. * Return: void
  7431. */
  7432. void
  7433. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  7434. int wds_tx_ucast, int wds_tx_mcast)
  7435. {
  7436. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7437. if (wds_tx_ucast || wds_tx_mcast) {
  7438. peer->wds_enabled = 1;
  7439. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  7440. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  7441. } else {
  7442. peer->wds_enabled = 0;
  7443. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  7444. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  7445. }
  7446. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7447. FL("Policy Update set to :\
  7448. peer->wds_enabled %d\
  7449. peer->wds_ecm.wds_tx_ucast_4addr %d\
  7450. peer->wds_ecm.wds_tx_mcast_4addr %d"),
  7451. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  7452. peer->wds_ecm.wds_tx_mcast_4addr);
  7453. return;
  7454. }
  7455. #endif
  7456. static struct cdp_wds_ops dp_ops_wds = {
  7457. .vdev_set_wds = dp_vdev_set_wds,
  7458. #ifdef WDS_VENDOR_EXTENSION
  7459. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7460. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7461. #endif
  7462. };
  7463. /*
  7464. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7465. * @vdev_handle - datapath vdev handle
  7466. * @callback - callback function
  7467. * @ctxt: callback context
  7468. *
  7469. */
  7470. static void
  7471. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  7472. ol_txrx_data_tx_cb callback, void *ctxt)
  7473. {
  7474. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7475. vdev->tx_non_std_data_callback.func = callback;
  7476. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7477. }
  7478. /**
  7479. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7480. * @pdev_hdl: datapath pdev handle
  7481. *
  7482. * Return: opaque pointer to dp txrx handle
  7483. */
  7484. static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
  7485. {
  7486. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7487. return pdev->dp_txrx_handle;
  7488. }
  7489. /**
  7490. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7491. * @pdev_hdl: datapath pdev handle
  7492. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7493. *
  7494. * Return: void
  7495. */
  7496. static void
  7497. dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
  7498. {
  7499. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7500. pdev->dp_txrx_handle = dp_txrx_hdl;
  7501. }
  7502. /**
  7503. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7504. * @soc_handle: datapath soc handle
  7505. *
  7506. * Return: opaque pointer to external dp (non-core DP)
  7507. */
  7508. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7509. {
  7510. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7511. return soc->external_txrx_handle;
  7512. }
  7513. /**
  7514. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7515. * @soc_handle: datapath soc handle
  7516. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7517. *
  7518. * Return: void
  7519. */
  7520. static void
  7521. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7522. {
  7523. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7524. soc->external_txrx_handle = txrx_handle;
  7525. }
  7526. /**
  7527. * dp_get_cfg_capabilities() - get dp capabilities
  7528. * @soc_handle: datapath soc handle
  7529. * @dp_caps: enum for dp capabilities
  7530. *
  7531. * Return: bool to determine if dp caps is enabled
  7532. */
  7533. static bool
  7534. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7535. enum cdp_capabilities dp_caps)
  7536. {
  7537. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7538. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7539. }
  7540. #ifdef FEATURE_AST
  7541. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  7542. {
  7543. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  7544. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  7545. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7546. /*
  7547. * For BSS peer, new peer is not created on alloc_node if the
  7548. * peer with same address already exists , instead refcnt is
  7549. * increased for existing peer. Correspondingly in delete path,
  7550. * only refcnt is decreased; and peer is only deleted , when all
  7551. * references are deleted. So delete_in_progress should not be set
  7552. * for bss_peer, unless only 2 reference remains (peer map reference
  7553. * and peer hash table reference).
  7554. */
  7555. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
  7556. return;
  7557. }
  7558. peer->delete_in_progress = true;
  7559. dp_peer_delete_ast_entries(soc, peer);
  7560. }
  7561. #endif
  7562. #ifdef ATH_SUPPORT_NAC_RSSI
  7563. /**
  7564. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7565. * @vdev_hdl: DP vdev handle
  7566. * @rssi: rssi value
  7567. *
  7568. * Return: 0 for success. nonzero for failure.
  7569. */
  7570. QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7571. char *mac_addr,
  7572. uint8_t *rssi)
  7573. {
  7574. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7575. struct dp_pdev *pdev = vdev->pdev;
  7576. struct dp_neighbour_peer *peer = NULL;
  7577. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7578. *rssi = 0;
  7579. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7580. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7581. neighbour_peer_list_elem) {
  7582. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7583. mac_addr, DP_MAC_ADDR_LEN) == 0) {
  7584. *rssi = peer->rssi;
  7585. status = QDF_STATUS_SUCCESS;
  7586. break;
  7587. }
  7588. }
  7589. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7590. return status;
  7591. }
  7592. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7593. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7594. uint8_t chan_num)
  7595. {
  7596. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7597. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7598. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7599. pdev->nac_rssi_filtering = 1;
  7600. /* Store address of NAC (neighbour peer) which will be checked
  7601. * against TA of received packets.
  7602. */
  7603. if (cmd == CDP_NAC_PARAM_ADD) {
  7604. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7605. client_macaddr);
  7606. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7607. dp_update_filter_neighbour_peers(vdev_handle,
  7608. DP_NAC_PARAM_DEL,
  7609. client_macaddr);
  7610. }
  7611. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7612. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7613. ((void *)vdev->pdev->ctrl_pdev,
  7614. vdev->vdev_id, cmd, bssid);
  7615. return QDF_STATUS_SUCCESS;
  7616. }
  7617. #endif
  7618. /**
  7619. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7620. * for pktlog
  7621. * @txrx_pdev_handle: cdp_pdev handle
  7622. * @enb_dsb: Enable or disable peer based filtering
  7623. *
  7624. * Return: QDF_STATUS
  7625. */
  7626. static int
  7627. dp_enable_peer_based_pktlog(
  7628. struct cdp_pdev *txrx_pdev_handle,
  7629. char *mac_addr, uint8_t enb_dsb)
  7630. {
  7631. struct dp_peer *peer;
  7632. uint8_t local_id;
  7633. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7634. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7635. mac_addr, &local_id);
  7636. if (!peer) {
  7637. dp_err("Invalid Peer");
  7638. return QDF_STATUS_E_FAILURE;
  7639. }
  7640. peer->peer_based_pktlog_filter = enb_dsb;
  7641. pdev->dp_peer_based_pktlog = enb_dsb;
  7642. return QDF_STATUS_SUCCESS;
  7643. }
  7644. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  7645. uint32_t max_peers,
  7646. bool peer_map_unmap_v2)
  7647. {
  7648. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7649. soc->max_peers = max_peers;
  7650. qdf_print ("%s max_peers %u\n", __func__, max_peers);
  7651. if (dp_peer_find_attach(soc))
  7652. return QDF_STATUS_E_FAILURE;
  7653. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  7654. return QDF_STATUS_SUCCESS;
  7655. }
  7656. /**
  7657. * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
  7658. * @dp_pdev: dp pdev handle
  7659. * @ctrl_pdev: UMAC ctrl pdev handle
  7660. *
  7661. * Return: void
  7662. */
  7663. static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
  7664. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  7665. {
  7666. struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
  7667. pdev->ctrl_pdev = ctrl_pdev;
  7668. }
  7669. /*
  7670. * dp_get_cfg() - get dp cfg
  7671. * @soc: cdp soc handle
  7672. * @cfg: cfg enum
  7673. *
  7674. * Return: cfg value
  7675. */
  7676. static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
  7677. {
  7678. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  7679. uint32_t value = 0;
  7680. switch (cfg) {
  7681. case cfg_dp_enable_data_stall:
  7682. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  7683. break;
  7684. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  7685. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  7686. break;
  7687. case cfg_dp_tso_enable:
  7688. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  7689. break;
  7690. case cfg_dp_lro_enable:
  7691. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  7692. break;
  7693. case cfg_dp_gro_enable:
  7694. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  7695. break;
  7696. case cfg_dp_tx_flow_start_queue_offset:
  7697. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  7698. break;
  7699. case cfg_dp_tx_flow_stop_queue_threshold:
  7700. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  7701. break;
  7702. case cfg_dp_disable_intra_bss_fwd:
  7703. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  7704. break;
  7705. default:
  7706. value = 0;
  7707. }
  7708. return value;
  7709. }
  7710. static struct cdp_cmn_ops dp_ops_cmn = {
  7711. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  7712. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  7713. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  7714. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  7715. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  7716. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  7717. .txrx_peer_create = dp_peer_create_wifi3,
  7718. .txrx_peer_setup = dp_peer_setup_wifi3,
  7719. #ifdef FEATURE_AST
  7720. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  7721. #else
  7722. .txrx_peer_teardown = NULL,
  7723. #endif
  7724. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  7725. .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
  7726. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  7727. .txrx_peer_ast_hash_find_soc = dp_peer_ast_hash_find_soc_wifi3,
  7728. .txrx_peer_ast_hash_find_by_pdevid =
  7729. dp_peer_ast_hash_find_by_pdevid_wifi3,
  7730. .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
  7731. .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
  7732. .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
  7733. .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
  7734. .txrx_peer_ast_get_peer = dp_peer_ast_get_peer_wifi3,
  7735. .txrx_peer_ast_get_nexthop_peer_id =
  7736. dp_peer_ast_get_nexhop_peer_id_wifi3,
  7737. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  7738. .txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
  7739. .txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
  7740. .txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
  7741. .txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
  7742. #endif
  7743. .txrx_peer_delete = dp_peer_delete_wifi3,
  7744. .txrx_vdev_register = dp_vdev_register_wifi3,
  7745. .txrx_soc_detach = dp_soc_detach_wifi3,
  7746. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  7747. .txrx_soc_init = dp_soc_init_wifi3,
  7748. .txrx_tso_soc_attach = dp_tso_soc_attach,
  7749. .txrx_tso_soc_detach = dp_tso_soc_detach,
  7750. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  7751. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  7752. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  7753. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  7754. .txrx_ath_getstats = dp_get_device_stats,
  7755. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  7756. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  7757. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  7758. .delba_process = dp_delba_process_wifi3,
  7759. .set_addba_response = dp_set_addba_response,
  7760. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  7761. .flush_cache_rx_queue = NULL,
  7762. /* TODO: get API's for dscp-tid need to be added*/
  7763. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  7764. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  7765. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  7766. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  7767. .txrx_get_total_per = dp_get_total_per,
  7768. .txrx_stats_request = dp_txrx_stats_request,
  7769. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  7770. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  7771. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  7772. .txrx_set_nac = dp_set_nac,
  7773. .txrx_get_tx_pending = dp_get_tx_pending,
  7774. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  7775. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  7776. .display_stats = dp_txrx_dump_stats,
  7777. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  7778. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  7779. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  7780. .txrx_intr_detach = dp_soc_interrupt_detach,
  7781. .set_pn_check = dp_set_pn_check_wifi3,
  7782. .update_config_parameters = dp_update_config_parameters,
  7783. /* TODO: Add other functions */
  7784. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  7785. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  7786. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  7787. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  7788. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  7789. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  7790. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  7791. .tx_send = dp_tx_send,
  7792. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  7793. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  7794. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  7795. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  7796. .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
  7797. .txrx_get_os_rx_handles_from_vdev =
  7798. dp_get_os_rx_handles_from_vdev_wifi3,
  7799. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  7800. .get_dp_capabilities = dp_get_cfg_capabilities,
  7801. .txrx_get_cfg = dp_get_cfg,
  7802. };
  7803. static struct cdp_ctrl_ops dp_ops_ctrl = {
  7804. .txrx_peer_authorize = dp_peer_authorize,
  7805. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  7806. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  7807. #ifdef MESH_MODE_SUPPORT
  7808. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  7809. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  7810. #endif
  7811. .txrx_set_vdev_param = dp_set_vdev_param,
  7812. .txrx_peer_set_nawds = dp_peer_set_nawds,
  7813. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  7814. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  7815. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  7816. .txrx_update_filter_neighbour_peers =
  7817. dp_update_filter_neighbour_peers,
  7818. .txrx_get_sec_type = dp_get_sec_type,
  7819. /* TODO: Add other functions */
  7820. .txrx_wdi_event_sub = dp_wdi_event_sub,
  7821. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  7822. #ifdef WDI_EVENT_ENABLE
  7823. .txrx_get_pldev = dp_get_pldev,
  7824. #endif
  7825. .txrx_set_pdev_param = dp_set_pdev_param,
  7826. #ifdef ATH_SUPPORT_NAC_RSSI
  7827. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  7828. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  7829. #endif
  7830. .set_key = dp_set_michael_key,
  7831. .txrx_get_vdev_param = dp_get_vdev_param,
  7832. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  7833. };
  7834. static struct cdp_me_ops dp_ops_me = {
  7835. #ifdef ATH_SUPPORT_IQUE
  7836. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  7837. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  7838. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  7839. #endif
  7840. .tx_me_find_ast_entry = NULL,
  7841. };
  7842. static struct cdp_mon_ops dp_ops_mon = {
  7843. .txrx_monitor_set_filter_ucast_data = NULL,
  7844. .txrx_monitor_set_filter_mcast_data = NULL,
  7845. .txrx_monitor_set_filter_non_data = NULL,
  7846. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  7847. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  7848. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  7849. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  7850. /* Added support for HK advance filter */
  7851. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  7852. };
  7853. static struct cdp_host_stats_ops dp_ops_host_stats = {
  7854. .txrx_per_peer_stats = dp_get_host_peer_stats,
  7855. .get_fw_peer_stats = dp_get_fw_peer_stats,
  7856. .get_htt_stats = dp_get_htt_stats,
  7857. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  7858. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  7859. .txrx_stats_publish = dp_txrx_stats_publish,
  7860. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  7861. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  7862. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  7863. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  7864. /* TODO */
  7865. };
  7866. static struct cdp_raw_ops dp_ops_raw = {
  7867. /* TODO */
  7868. };
  7869. #ifdef CONFIG_WIN
  7870. static struct cdp_pflow_ops dp_ops_pflow = {
  7871. /* TODO */
  7872. };
  7873. #endif /* CONFIG_WIN */
  7874. #ifdef FEATURE_RUNTIME_PM
  7875. /**
  7876. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  7877. * @opaque_pdev: DP pdev context
  7878. *
  7879. * DP is ready to runtime suspend if there are no pending TX packets.
  7880. *
  7881. * Return: QDF_STATUS
  7882. */
  7883. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  7884. {
  7885. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7886. struct dp_soc *soc = pdev->soc;
  7887. /* Abort if there are any pending TX packets */
  7888. if (dp_get_tx_pending(opaque_pdev) > 0) {
  7889. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7890. FL("Abort suspend due to pending TX packets"));
  7891. return QDF_STATUS_E_AGAIN;
  7892. }
  7893. if (soc->intr_mode == DP_INTR_POLL)
  7894. qdf_timer_stop(&soc->int_timer);
  7895. return QDF_STATUS_SUCCESS;
  7896. }
  7897. /**
  7898. * dp_runtime_resume() - ensure DP is ready to runtime resume
  7899. * @opaque_pdev: DP pdev context
  7900. *
  7901. * Resume DP for runtime PM.
  7902. *
  7903. * Return: QDF_STATUS
  7904. */
  7905. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  7906. {
  7907. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7908. struct dp_soc *soc = pdev->soc;
  7909. void *hal_srng;
  7910. int i;
  7911. if (soc->intr_mode == DP_INTR_POLL)
  7912. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  7913. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  7914. hal_srng = soc->tcl_data_ring[i].hal_srng;
  7915. if (hal_srng) {
  7916. /* We actually only need to acquire the lock */
  7917. hal_srng_access_start(soc->hal_soc, hal_srng);
  7918. /* Update SRC ring head pointer for HW to send
  7919. all pending packets */
  7920. hal_srng_access_end(soc->hal_soc, hal_srng);
  7921. }
  7922. }
  7923. return QDF_STATUS_SUCCESS;
  7924. }
  7925. #endif /* FEATURE_RUNTIME_PM */
  7926. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  7927. {
  7928. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7929. struct dp_soc *soc = pdev->soc;
  7930. if (soc->intr_mode == DP_INTR_POLL)
  7931. qdf_timer_stop(&soc->int_timer);
  7932. return QDF_STATUS_SUCCESS;
  7933. }
  7934. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  7935. {
  7936. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7937. struct dp_soc *soc = pdev->soc;
  7938. if (soc->intr_mode == DP_INTR_POLL)
  7939. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  7940. return QDF_STATUS_SUCCESS;
  7941. }
  7942. #ifndef CONFIG_WIN
  7943. static struct cdp_misc_ops dp_ops_misc = {
  7944. .tx_non_std = dp_tx_non_std,
  7945. .get_opmode = dp_get_opmode,
  7946. #ifdef FEATURE_RUNTIME_PM
  7947. .runtime_suspend = dp_runtime_suspend,
  7948. .runtime_resume = dp_runtime_resume,
  7949. #endif /* FEATURE_RUNTIME_PM */
  7950. .pkt_log_init = dp_pkt_log_init,
  7951. .pkt_log_con_service = dp_pkt_log_con_service,
  7952. };
  7953. static struct cdp_flowctl_ops dp_ops_flowctl = {
  7954. /* WIFI 3.0 DP implement as required. */
  7955. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7956. .flow_pool_map_handler = dp_tx_flow_pool_map,
  7957. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  7958. .register_pause_cb = dp_txrx_register_pause_cb,
  7959. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  7960. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  7961. };
  7962. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  7963. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  7964. };
  7965. #ifdef IPA_OFFLOAD
  7966. static struct cdp_ipa_ops dp_ops_ipa = {
  7967. .ipa_get_resource = dp_ipa_get_resource,
  7968. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  7969. .ipa_op_response = dp_ipa_op_response,
  7970. .ipa_register_op_cb = dp_ipa_register_op_cb,
  7971. .ipa_get_stat = dp_ipa_get_stat,
  7972. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  7973. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  7974. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  7975. .ipa_setup = dp_ipa_setup,
  7976. .ipa_cleanup = dp_ipa_cleanup,
  7977. .ipa_setup_iface = dp_ipa_setup_iface,
  7978. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  7979. .ipa_enable_pipes = dp_ipa_enable_pipes,
  7980. .ipa_disable_pipes = dp_ipa_disable_pipes,
  7981. .ipa_set_perf_level = dp_ipa_set_perf_level
  7982. };
  7983. #endif
  7984. static struct cdp_bus_ops dp_ops_bus = {
  7985. .bus_suspend = dp_bus_suspend,
  7986. .bus_resume = dp_bus_resume
  7987. };
  7988. static struct cdp_ocb_ops dp_ops_ocb = {
  7989. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  7990. };
  7991. static struct cdp_throttle_ops dp_ops_throttle = {
  7992. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  7993. };
  7994. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  7995. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  7996. };
  7997. static struct cdp_cfg_ops dp_ops_cfg = {
  7998. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  7999. };
  8000. /*
  8001. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8002. * @dev: physical device instance
  8003. * @peer_mac_addr: peer mac address
  8004. * @local_id: local id for the peer
  8005. * @debug_id: to track enum peer access
  8006. *
  8007. * Return: peer instance pointer
  8008. */
  8009. static inline void *
  8010. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8011. uint8_t *local_id,
  8012. enum peer_debug_id_type debug_id)
  8013. {
  8014. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8015. struct dp_peer *peer;
  8016. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8017. if (!peer)
  8018. return NULL;
  8019. *local_id = peer->local_id;
  8020. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  8021. return peer;
  8022. }
  8023. /*
  8024. * dp_peer_release_ref - release peer ref count
  8025. * @peer: peer handle
  8026. * @debug_id: to track enum peer access
  8027. *
  8028. * Return: None
  8029. */
  8030. static inline
  8031. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8032. {
  8033. dp_peer_unref_delete(peer);
  8034. }
  8035. static struct cdp_peer_ops dp_ops_peer = {
  8036. .register_peer = dp_register_peer,
  8037. .clear_peer = dp_clear_peer,
  8038. .find_peer_by_addr = dp_find_peer_by_addr,
  8039. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8040. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8041. .peer_release_ref = dp_peer_release_ref,
  8042. .local_peer_id = dp_local_peer_id,
  8043. .peer_find_by_local_id = dp_peer_find_by_local_id,
  8044. .peer_state_update = dp_peer_state_update,
  8045. .get_vdevid = dp_get_vdevid,
  8046. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  8047. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8048. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8049. .get_peer_state = dp_get_peer_state,
  8050. };
  8051. #endif
  8052. static struct cdp_ops dp_txrx_ops = {
  8053. .cmn_drv_ops = &dp_ops_cmn,
  8054. .ctrl_ops = &dp_ops_ctrl,
  8055. .me_ops = &dp_ops_me,
  8056. .mon_ops = &dp_ops_mon,
  8057. .host_stats_ops = &dp_ops_host_stats,
  8058. .wds_ops = &dp_ops_wds,
  8059. .raw_ops = &dp_ops_raw,
  8060. #ifdef CONFIG_WIN
  8061. .pflow_ops = &dp_ops_pflow,
  8062. #endif /* CONFIG_WIN */
  8063. #ifndef CONFIG_WIN
  8064. .misc_ops = &dp_ops_misc,
  8065. .cfg_ops = &dp_ops_cfg,
  8066. .flowctl_ops = &dp_ops_flowctl,
  8067. .l_flowctl_ops = &dp_ops_l_flowctl,
  8068. #ifdef IPA_OFFLOAD
  8069. .ipa_ops = &dp_ops_ipa,
  8070. #endif
  8071. .bus_ops = &dp_ops_bus,
  8072. .ocb_ops = &dp_ops_ocb,
  8073. .peer_ops = &dp_ops_peer,
  8074. .throttle_ops = &dp_ops_throttle,
  8075. .mob_stats_ops = &dp_ops_mob_stats,
  8076. #endif
  8077. };
  8078. /*
  8079. * dp_soc_set_txrx_ring_map()
  8080. * @dp_soc: DP handler for soc
  8081. *
  8082. * Return: Void
  8083. */
  8084. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  8085. {
  8086. uint32_t i;
  8087. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  8088. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  8089. }
  8090. }
  8091. #ifdef QCA_WIFI_QCA8074
  8092. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  8093. /**
  8094. * dp_soc_attach_wifi3() - Attach txrx SOC
  8095. * @ctrl_psoc: Opaque SOC handle from control plane
  8096. * @htc_handle: Opaque HTC handle
  8097. * @hif_handle: Opaque HIF handle
  8098. * @qdf_osdev: QDF device
  8099. * @ol_ops: Offload Operations
  8100. * @device_id: Device ID
  8101. *
  8102. * Return: DP SOC handle on success, NULL on failure
  8103. */
  8104. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8105. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8106. struct ol_if_ops *ol_ops, uint16_t device_id)
  8107. {
  8108. struct dp_soc *dp_soc = NULL;
  8109. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8110. ol_ops, device_id);
  8111. if (!dp_soc)
  8112. return NULL;
  8113. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  8114. return NULL;
  8115. return (void *)dp_soc;
  8116. }
  8117. #else
  8118. /**
  8119. * dp_soc_attach_wifi3() - Attach txrx SOC
  8120. * @ctrl_psoc: Opaque SOC handle from control plane
  8121. * @htc_handle: Opaque HTC handle
  8122. * @hif_handle: Opaque HIF handle
  8123. * @qdf_osdev: QDF device
  8124. * @ol_ops: Offload Operations
  8125. * @device_id: Device ID
  8126. *
  8127. * Return: DP SOC handle on success, NULL on failure
  8128. */
  8129. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8130. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8131. struct ol_if_ops *ol_ops, uint16_t device_id)
  8132. {
  8133. struct dp_soc *dp_soc = NULL;
  8134. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8135. ol_ops, device_id);
  8136. return (void *)dp_soc;
  8137. }
  8138. #endif
  8139. /**
  8140. * dp_soc_attach() - Attach txrx SOC
  8141. * @ctrl_psoc: Opaque SOC handle from control plane
  8142. * @htc_handle: Opaque HTC handle
  8143. * @qdf_osdev: QDF device
  8144. * @ol_ops: Offload Operations
  8145. * @device_id: Device ID
  8146. *
  8147. * Return: DP SOC handle on success, NULL on failure
  8148. */
  8149. static struct dp_soc *
  8150. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8151. struct ol_if_ops *ol_ops, uint16_t device_id)
  8152. {
  8153. int int_ctx;
  8154. struct dp_soc *soc = NULL;
  8155. struct htt_soc *htt_soc = NULL;
  8156. soc = qdf_mem_malloc(sizeof(*soc));
  8157. if (!soc) {
  8158. dp_err("DP SOC memory allocation failed");
  8159. goto fail0;
  8160. }
  8161. int_ctx = 0;
  8162. soc->device_id = device_id;
  8163. soc->cdp_soc.ops = &dp_txrx_ops;
  8164. soc->cdp_soc.ol_ops = ol_ops;
  8165. soc->ctrl_psoc = ctrl_psoc;
  8166. soc->osdev = qdf_osdev;
  8167. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  8168. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  8169. if (!soc->wlan_cfg_ctx) {
  8170. dp_err("wlan_cfg_ctx failed\n");
  8171. goto fail1;
  8172. }
  8173. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  8174. if (!htt_soc) {
  8175. dp_err("HTT attach failed");
  8176. goto fail1;
  8177. }
  8178. soc->htt_handle = htt_soc;
  8179. htt_soc->dp_soc = soc;
  8180. htt_soc->htc_soc = htc_handle;
  8181. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  8182. goto fail2;
  8183. return (void *)soc;
  8184. fail2:
  8185. qdf_mem_free(htt_soc);
  8186. fail1:
  8187. qdf_mem_free(soc);
  8188. fail0:
  8189. return NULL;
  8190. }
  8191. /**
  8192. * dp_soc_init() - Initialize txrx SOC
  8193. * @dp_soc: Opaque DP SOC handle
  8194. * @htc_handle: Opaque HTC handle
  8195. * @hif_handle: Opaque HIF handle
  8196. *
  8197. * Return: DP SOC handle on success, NULL on failure
  8198. */
  8199. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
  8200. {
  8201. int target_type;
  8202. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  8203. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  8204. htt_soc->htc_soc = htc_handle;
  8205. soc->hif_handle = hif_handle;
  8206. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  8207. if (!soc->hal_soc)
  8208. return NULL;
  8209. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
  8210. soc->hal_soc, soc->osdev);
  8211. target_type = hal_get_target_type(soc->hal_soc);
  8212. switch (target_type) {
  8213. case TARGET_TYPE_QCA6290:
  8214. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8215. REO_DST_RING_SIZE_QCA6290);
  8216. soc->ast_override_support = 1;
  8217. break;
  8218. #ifdef QCA_WIFI_QCA6390
  8219. case TARGET_TYPE_QCA6390:
  8220. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8221. REO_DST_RING_SIZE_QCA6290);
  8222. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8223. soc->ast_override_support = 1;
  8224. if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  8225. int int_ctx;
  8226. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  8227. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  8228. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  8229. }
  8230. }
  8231. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  8232. break;
  8233. #endif
  8234. case TARGET_TYPE_QCA8074:
  8235. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8236. REO_DST_RING_SIZE_QCA8074);
  8237. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8238. soc->hw_nac_monitor_support = 1;
  8239. break;
  8240. case TARGET_TYPE_QCA8074V2:
  8241. case TARGET_TYPE_QCA6018:
  8242. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8243. REO_DST_RING_SIZE_QCA8074);
  8244. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  8245. soc->hw_nac_monitor_support = 1;
  8246. soc->ast_override_support = 1;
  8247. soc->per_tid_basize_max_tid = 8;
  8248. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  8249. break;
  8250. default:
  8251. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  8252. qdf_assert_always(0);
  8253. break;
  8254. }
  8255. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  8256. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  8257. soc->cce_disable = false;
  8258. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  8259. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8260. CDP_CFG_MAX_PEER_ID);
  8261. if (ret != -EINVAL) {
  8262. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  8263. }
  8264. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8265. CDP_CFG_CCE_DISABLE);
  8266. if (ret == 1)
  8267. soc->cce_disable = true;
  8268. }
  8269. qdf_spinlock_create(&soc->peer_ref_mutex);
  8270. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  8271. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  8272. /* fill the tx/rx cpu ring map*/
  8273. dp_soc_set_txrx_ring_map(soc);
  8274. qdf_spinlock_create(&soc->htt_stats.lock);
  8275. /* initialize work queue for stats processing */
  8276. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  8277. return soc;
  8278. }
  8279. /**
  8280. * dp_soc_init_wifi3() - Initialize txrx SOC
  8281. * @dp_soc: Opaque DP SOC handle
  8282. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  8283. * @hif_handle: Opaque HIF handle
  8284. * @htc_handle: Opaque HTC handle
  8285. * @qdf_osdev: QDF device (Unused)
  8286. * @ol_ops: Offload Operations (Unused)
  8287. * @device_id: Device ID (Unused)
  8288. *
  8289. * Return: DP SOC handle on success, NULL on failure
  8290. */
  8291. void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
  8292. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8293. struct ol_if_ops *ol_ops, uint16_t device_id)
  8294. {
  8295. return dp_soc_init(dpsoc, htc_handle, hif_handle);
  8296. }
  8297. #endif
  8298. /*
  8299. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  8300. *
  8301. * @soc: handle to DP soc
  8302. * @mac_id: MAC id
  8303. *
  8304. * Return: Return pdev corresponding to MAC
  8305. */
  8306. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  8307. {
  8308. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  8309. return soc->pdev_list[mac_id];
  8310. /* Typically for MCL as there only 1 PDEV*/
  8311. return soc->pdev_list[0];
  8312. }
  8313. /*
  8314. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  8315. * @soc: DP SoC context
  8316. * @max_mac_rings: No of MAC rings
  8317. *
  8318. * Return: None
  8319. */
  8320. static
  8321. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  8322. int *max_mac_rings)
  8323. {
  8324. bool dbs_enable = false;
  8325. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  8326. dbs_enable = soc->cdp_soc.ol_ops->
  8327. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  8328. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  8329. }
  8330. /*
  8331. * dp_set_pktlog_wifi3() - attach txrx vdev
  8332. * @pdev: Datapath PDEV handle
  8333. * @event: which event's notifications are being subscribed to
  8334. * @enable: WDI event subscribe or not. (True or False)
  8335. *
  8336. * Return: Success, NULL on failure
  8337. */
  8338. #ifdef WDI_EVENT_ENABLE
  8339. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  8340. bool enable)
  8341. {
  8342. struct dp_soc *soc = NULL;
  8343. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  8344. int max_mac_rings = wlan_cfg_get_num_mac_rings
  8345. (pdev->wlan_cfg_ctx);
  8346. uint8_t mac_id = 0;
  8347. soc = pdev->soc;
  8348. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  8349. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  8350. FL("Max_mac_rings %d "),
  8351. max_mac_rings);
  8352. if (enable) {
  8353. switch (event) {
  8354. case WDI_EVENT_RX_DESC:
  8355. if (pdev->monitor_vdev) {
  8356. /* Nothing needs to be done if monitor mode is
  8357. * enabled
  8358. */
  8359. return 0;
  8360. }
  8361. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  8362. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  8363. htt_tlv_filter.mpdu_start = 1;
  8364. htt_tlv_filter.msdu_start = 1;
  8365. htt_tlv_filter.msdu_end = 1;
  8366. htt_tlv_filter.mpdu_end = 1;
  8367. htt_tlv_filter.packet_header = 1;
  8368. htt_tlv_filter.attention = 1;
  8369. htt_tlv_filter.ppdu_start = 1;
  8370. htt_tlv_filter.ppdu_end = 1;
  8371. htt_tlv_filter.ppdu_end_user_stats = 1;
  8372. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8373. htt_tlv_filter.ppdu_end_status_done = 1;
  8374. htt_tlv_filter.enable_fp = 1;
  8375. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8376. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8377. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8378. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8379. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8380. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8381. for (mac_id = 0; mac_id < max_mac_rings;
  8382. mac_id++) {
  8383. int mac_for_pdev =
  8384. dp_get_mac_id_for_pdev(mac_id,
  8385. pdev->pdev_id);
  8386. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8387. mac_for_pdev,
  8388. pdev->rxdma_mon_status_ring[mac_id]
  8389. .hal_srng,
  8390. RXDMA_MONITOR_STATUS,
  8391. RX_BUFFER_SIZE,
  8392. &htt_tlv_filter);
  8393. }
  8394. if (soc->reap_timer_init)
  8395. qdf_timer_mod(&soc->mon_reap_timer,
  8396. DP_INTR_POLL_TIMER_MS);
  8397. }
  8398. break;
  8399. case WDI_EVENT_LITE_RX:
  8400. if (pdev->monitor_vdev) {
  8401. /* Nothing needs to be done if monitor mode is
  8402. * enabled
  8403. */
  8404. return 0;
  8405. }
  8406. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  8407. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  8408. htt_tlv_filter.ppdu_start = 1;
  8409. htt_tlv_filter.ppdu_end = 1;
  8410. htt_tlv_filter.ppdu_end_user_stats = 1;
  8411. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8412. htt_tlv_filter.ppdu_end_status_done = 1;
  8413. htt_tlv_filter.mpdu_start = 1;
  8414. htt_tlv_filter.enable_fp = 1;
  8415. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8416. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8417. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8418. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8419. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8420. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8421. for (mac_id = 0; mac_id < max_mac_rings;
  8422. mac_id++) {
  8423. int mac_for_pdev =
  8424. dp_get_mac_id_for_pdev(mac_id,
  8425. pdev->pdev_id);
  8426. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8427. mac_for_pdev,
  8428. pdev->rxdma_mon_status_ring[mac_id]
  8429. .hal_srng,
  8430. RXDMA_MONITOR_STATUS,
  8431. RX_BUFFER_SIZE_PKTLOG_LITE,
  8432. &htt_tlv_filter);
  8433. }
  8434. if (soc->reap_timer_init)
  8435. qdf_timer_mod(&soc->mon_reap_timer,
  8436. DP_INTR_POLL_TIMER_MS);
  8437. }
  8438. break;
  8439. case WDI_EVENT_LITE_T2H:
  8440. if (pdev->monitor_vdev) {
  8441. /* Nothing needs to be done if monitor mode is
  8442. * enabled
  8443. */
  8444. return 0;
  8445. }
  8446. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8447. int mac_for_pdev = dp_get_mac_id_for_pdev(
  8448. mac_id, pdev->pdev_id);
  8449. pdev->pktlog_ppdu_stats = true;
  8450. dp_h2t_cfg_stats_msg_send(pdev,
  8451. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  8452. mac_for_pdev);
  8453. }
  8454. break;
  8455. default:
  8456. /* Nothing needs to be done for other pktlog types */
  8457. break;
  8458. }
  8459. } else {
  8460. switch (event) {
  8461. case WDI_EVENT_RX_DESC:
  8462. case WDI_EVENT_LITE_RX:
  8463. if (pdev->monitor_vdev) {
  8464. /* Nothing needs to be done if monitor mode is
  8465. * enabled
  8466. */
  8467. return 0;
  8468. }
  8469. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  8470. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  8471. for (mac_id = 0; mac_id < max_mac_rings;
  8472. mac_id++) {
  8473. int mac_for_pdev =
  8474. dp_get_mac_id_for_pdev(mac_id,
  8475. pdev->pdev_id);
  8476. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8477. mac_for_pdev,
  8478. pdev->rxdma_mon_status_ring[mac_id]
  8479. .hal_srng,
  8480. RXDMA_MONITOR_STATUS,
  8481. RX_BUFFER_SIZE,
  8482. &htt_tlv_filter);
  8483. }
  8484. if (soc->reap_timer_init)
  8485. qdf_timer_stop(&soc->mon_reap_timer);
  8486. }
  8487. break;
  8488. case WDI_EVENT_LITE_T2H:
  8489. if (pdev->monitor_vdev) {
  8490. /* Nothing needs to be done if monitor mode is
  8491. * enabled
  8492. */
  8493. return 0;
  8494. }
  8495. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  8496. * passing value 0. Once these macros will define in htt
  8497. * header file will use proper macros
  8498. */
  8499. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8500. int mac_for_pdev =
  8501. dp_get_mac_id_for_pdev(mac_id,
  8502. pdev->pdev_id);
  8503. pdev->pktlog_ppdu_stats = false;
  8504. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  8505. dp_h2t_cfg_stats_msg_send(pdev, 0,
  8506. mac_for_pdev);
  8507. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  8508. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  8509. mac_for_pdev);
  8510. } else if (pdev->enhanced_stats_en) {
  8511. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  8512. mac_for_pdev);
  8513. }
  8514. }
  8515. break;
  8516. default:
  8517. /* Nothing needs to be done for other pktlog types */
  8518. break;
  8519. }
  8520. }
  8521. return 0;
  8522. }
  8523. #endif