dp_main.c 269 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_htt.h"
  30. #include "dp_types.h"
  31. #include "dp_internal.h"
  32. #include "dp_tx.h"
  33. #include "dp_tx_desc.h"
  34. #include "dp_rx.h"
  35. #include <cdp_txrx_handle.h>
  36. #include <wlan_cfg.h>
  37. #include "cdp_txrx_cmn_struct.h"
  38. #include "cdp_txrx_stats_struct.h"
  39. #include "cdp_txrx_cmn_reg.h"
  40. #include <qdf_util.h>
  41. #include "dp_peer.h"
  42. #include "dp_rx_mon.h"
  43. #include "htt_stats.h"
  44. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  45. #include "cfg_ucfg_api.h"
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. #include "cdp_txrx_flow_ctrl_v2.h"
  48. #else
  49. static inline void
  50. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  51. {
  52. return;
  53. }
  54. #endif
  55. #include "dp_ipa.h"
  56. #include "dp_cal_client_api.h"
  57. #ifdef CONFIG_MCL
  58. extern int con_mode_monitor;
  59. #ifndef REMOVE_PKT_LOG
  60. #include <pktlog_ac_api.h>
  61. #include <pktlog_ac.h>
  62. #endif
  63. #endif
  64. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
  65. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  66. static struct dp_soc *
  67. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  68. struct ol_if_ops *ol_ops, uint16_t device_id);
  69. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  70. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  71. uint8_t *peer_mac_addr,
  72. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  73. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
  74. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  75. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  76. #define DP_INTR_POLL_TIMER_MS 10
  77. /* Generic AST entry aging timer value */
  78. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  79. /* WDS AST entry aging timer value */
  80. #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
  81. #define DP_WDS_AST_AGING_TIMER_CNT \
  82. ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
  83. #define DP_MCS_LENGTH (6*MAX_MCS)
  84. #define DP_NSS_LENGTH (6*SS_COUNT)
  85. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  86. #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
  87. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  88. #define DP_MAX_MCS_STRING_LEN 30
  89. #define DP_CURR_FW_STATS_AVAIL 19
  90. #define DP_HTT_DBG_EXT_STATS_MAX 256
  91. #define DP_MAX_SLEEP_TIME 100
  92. #ifdef IPA_OFFLOAD
  93. /* Exclude IPA rings from the interrupt context */
  94. #define TX_RING_MASK_VAL 0xb
  95. #define RX_RING_MASK_VAL 0x7
  96. #else
  97. #define TX_RING_MASK_VAL 0xF
  98. #define RX_RING_MASK_VAL 0xF
  99. #endif
  100. #define STR_MAXLEN 64
  101. #define DP_PPDU_STATS_CFG_ALL 0xFFFF
  102. /* PPDU stats mask sent to FW to enable enhanced stats */
  103. #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
  104. /* PPDU stats mask sent to FW to support debug sniffer feature */
  105. #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
  106. /* PPDU stats mask sent to FW to support BPR feature*/
  107. #define DP_PPDU_STATS_CFG_BPR 0x2000
  108. /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
  109. #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
  110. DP_PPDU_STATS_CFG_ENH_STATS)
  111. /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
  112. #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
  113. DP_PPDU_TXLITE_STATS_BITMASK_CFG)
  114. #define RNG_ERR "SRNG setup failed for"
  115. /**
  116. * default_dscp_tid_map - Default DSCP-TID mapping
  117. *
  118. * DSCP TID
  119. * 000000 0
  120. * 001000 1
  121. * 010000 2
  122. * 011000 3
  123. * 100000 4
  124. * 101000 5
  125. * 110000 6
  126. * 111000 7
  127. */
  128. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  129. 0, 0, 0, 0, 0, 0, 0, 0,
  130. 1, 1, 1, 1, 1, 1, 1, 1,
  131. 2, 2, 2, 2, 2, 2, 2, 2,
  132. 3, 3, 3, 3, 3, 3, 3, 3,
  133. 4, 4, 4, 4, 4, 4, 4, 4,
  134. 5, 5, 5, 5, 5, 5, 5, 5,
  135. 6, 6, 6, 6, 6, 6, 6, 6,
  136. 7, 7, 7, 7, 7, 7, 7, 7,
  137. };
  138. /*
  139. * struct dp_rate_debug
  140. *
  141. * @mcs_type: print string for a given mcs
  142. * @valid: valid mcs rate?
  143. */
  144. struct dp_rate_debug {
  145. char mcs_type[DP_MAX_MCS_STRING_LEN];
  146. uint8_t valid;
  147. };
  148. #define MCS_VALID 1
  149. #define MCS_INVALID 0
  150. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  151. {
  152. {"OFDM 48 Mbps", MCS_VALID},
  153. {"OFDM 24 Mbps", MCS_VALID},
  154. {"OFDM 12 Mbps", MCS_VALID},
  155. {"OFDM 6 Mbps ", MCS_VALID},
  156. {"OFDM 54 Mbps", MCS_VALID},
  157. {"OFDM 36 Mbps", MCS_VALID},
  158. {"OFDM 18 Mbps", MCS_VALID},
  159. {"OFDM 9 Mbps ", MCS_VALID},
  160. {"INVALID ", MCS_INVALID},
  161. {"INVALID ", MCS_INVALID},
  162. {"INVALID ", MCS_INVALID},
  163. {"INVALID ", MCS_INVALID},
  164. {"INVALID ", MCS_VALID},
  165. },
  166. {
  167. {"CCK 11 Mbps Long ", MCS_VALID},
  168. {"CCK 5.5 Mbps Long ", MCS_VALID},
  169. {"CCK 2 Mbps Long ", MCS_VALID},
  170. {"CCK 1 Mbps Long ", MCS_VALID},
  171. {"CCK 11 Mbps Short ", MCS_VALID},
  172. {"CCK 5.5 Mbps Short", MCS_VALID},
  173. {"CCK 2 Mbps Short ", MCS_VALID},
  174. {"INVALID ", MCS_INVALID},
  175. {"INVALID ", MCS_INVALID},
  176. {"INVALID ", MCS_INVALID},
  177. {"INVALID ", MCS_INVALID},
  178. {"INVALID ", MCS_INVALID},
  179. {"INVALID ", MCS_VALID},
  180. },
  181. {
  182. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  183. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  184. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  185. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  186. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  187. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  188. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  189. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  190. {"INVALID ", MCS_INVALID},
  191. {"INVALID ", MCS_INVALID},
  192. {"INVALID ", MCS_INVALID},
  193. {"INVALID ", MCS_INVALID},
  194. {"INVALID ", MCS_VALID},
  195. },
  196. {
  197. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  198. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  199. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  200. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  201. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  202. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  203. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  204. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  205. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  206. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  207. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  208. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  209. {"INVALID ", MCS_VALID},
  210. },
  211. {
  212. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  213. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  214. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  215. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  216. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  217. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  218. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  219. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  220. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  221. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  222. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  223. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  224. {"INVALID ", MCS_VALID},
  225. }
  226. };
  227. /**
  228. * dp_cpu_ring_map_type - dp tx cpu ring map
  229. * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
  230. * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
  231. * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
  232. * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
  233. * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
  234. * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
  235. */
  236. enum dp_cpu_ring_map_types {
  237. DP_NSS_DEFAULT_MAP,
  238. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  239. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  240. DP_NSS_DBDC_OFFLOADED_MAP,
  241. DP_NSS_DBTC_OFFLOADED_MAP,
  242. DP_NSS_CPU_RING_MAP_MAX
  243. };
  244. /**
  245. * @brief Cpu to tx ring map
  246. */
  247. #ifdef CONFIG_WIN
  248. static uint8_t
  249. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  250. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  251. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  252. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  253. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  254. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  255. };
  256. #else
  257. static uint8_t
  258. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  259. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  260. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  261. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  262. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  263. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  264. };
  265. #endif
  266. /**
  267. * @brief Select the type of statistics
  268. */
  269. enum dp_stats_type {
  270. STATS_FW = 0,
  271. STATS_HOST = 1,
  272. STATS_TYPE_MAX = 2,
  273. };
  274. /**
  275. * @brief General Firmware statistics options
  276. *
  277. */
  278. enum dp_fw_stats {
  279. TXRX_FW_STATS_INVALID = -1,
  280. };
  281. /**
  282. * dp_stats_mapping_table - Firmware and Host statistics
  283. * currently supported
  284. */
  285. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  286. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  287. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  288. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  289. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  290. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  291. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  292. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  293. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  294. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  295. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  296. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  297. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  298. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  299. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  300. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  301. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  302. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  303. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  304. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  305. /* Last ENUM for HTT FW STATS */
  306. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  307. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  308. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  309. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  310. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  311. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  312. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  313. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  314. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  315. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  316. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  317. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  318. };
  319. /* MCL specific functions */
  320. #ifdef CONFIG_MCL
  321. /**
  322. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  323. * @soc: pointer to dp_soc handle
  324. * @intr_ctx_num: interrupt context number for which mon mask is needed
  325. *
  326. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  327. * This function is returning 0, since in interrupt mode(softirq based RX),
  328. * we donot want to process monitor mode rings in a softirq.
  329. *
  330. * So, in case packet log is enabled for SAP/STA/P2P modes,
  331. * regular interrupt processing will not process monitor mode rings. It would be
  332. * done in a separate timer context.
  333. *
  334. * Return: 0
  335. */
  336. static inline
  337. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  338. {
  339. return 0;
  340. }
  341. /*
  342. * dp_service_mon_rings()- timer to reap monitor rings
  343. * reqd as we are not getting ppdu end interrupts
  344. * @arg: SoC Handle
  345. *
  346. * Return:
  347. *
  348. */
  349. static void dp_service_mon_rings(void *arg)
  350. {
  351. struct dp_soc *soc = (struct dp_soc *)arg;
  352. int ring = 0, work_done, mac_id;
  353. struct dp_pdev *pdev = NULL;
  354. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  355. pdev = soc->pdev_list[ring];
  356. if (!pdev)
  357. continue;
  358. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  359. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  360. pdev->pdev_id);
  361. work_done = dp_mon_process(soc, mac_for_pdev,
  362. QCA_NAPI_BUDGET);
  363. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  364. FL("Reaped %d descs from Monitor rings"),
  365. work_done);
  366. }
  367. }
  368. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  369. }
  370. #ifndef REMOVE_PKT_LOG
  371. /**
  372. * dp_pkt_log_init() - API to initialize packet log
  373. * @ppdev: physical device handle
  374. * @scn: HIF context
  375. *
  376. * Return: none
  377. */
  378. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  379. {
  380. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  381. if (handle->pkt_log_init) {
  382. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  383. "%s: Packet log not initialized", __func__);
  384. return;
  385. }
  386. pktlog_sethandle(&handle->pl_dev, scn);
  387. pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
  388. if (pktlogmod_init(scn)) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "%s: pktlogmod_init failed", __func__);
  391. handle->pkt_log_init = false;
  392. } else {
  393. handle->pkt_log_init = true;
  394. }
  395. }
  396. /**
  397. * dp_pkt_log_con_service() - connect packet log service
  398. * @ppdev: physical device handle
  399. * @scn: device context
  400. *
  401. * Return: none
  402. */
  403. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  404. {
  405. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  406. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  407. pktlog_htc_attach();
  408. }
  409. /**
  410. * dp_get_num_rx_contexts() - get number of RX contexts
  411. * @soc_hdl: cdp opaque soc handle
  412. *
  413. * Return: number of RX contexts
  414. */
  415. static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
  416. {
  417. int i;
  418. int num_rx_contexts = 0;
  419. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  420. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  421. if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
  422. num_rx_contexts++;
  423. return num_rx_contexts;
  424. }
  425. /**
  426. * dp_pktlogmod_exit() - API to cleanup pktlog info
  427. * @handle: Pdev handle
  428. *
  429. * Return: none
  430. */
  431. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  432. {
  433. void *scn = (void *)handle->soc->hif_handle;
  434. if (!scn) {
  435. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  436. "%s: Invalid hif(scn) handle", __func__);
  437. return;
  438. }
  439. pktlogmod_exit(scn);
  440. handle->pkt_log_init = false;
  441. }
  442. #endif
  443. #else
  444. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  445. /**
  446. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  447. * @soc: pointer to dp_soc handle
  448. * @intr_ctx_num: interrupt context number for which mon mask is needed
  449. *
  450. * Return: mon mask value
  451. */
  452. static inline
  453. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  454. {
  455. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  456. }
  457. #endif
  458. /**
  459. * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
  460. * @cdp_opaque_vdev: pointer to cdp_vdev
  461. *
  462. * Return: pointer to dp_vdev
  463. */
  464. static
  465. struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
  466. {
  467. return (struct dp_vdev *)cdp_opaque_vdev;
  468. }
  469. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  470. struct cdp_peer *peer_hdl,
  471. uint8_t *mac_addr,
  472. enum cdp_txrx_ast_entry_type type,
  473. uint32_t flags)
  474. {
  475. return dp_peer_add_ast((struct dp_soc *)soc_hdl,
  476. (struct dp_peer *)peer_hdl,
  477. mac_addr,
  478. type,
  479. flags);
  480. }
  481. static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
  482. void *ast_entry_hdl)
  483. {
  484. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  485. qdf_spin_lock_bh(&soc->ast_lock);
  486. dp_peer_del_ast((struct dp_soc *)soc_hdl,
  487. (struct dp_ast_entry *)ast_entry_hdl);
  488. qdf_spin_unlock_bh(&soc->ast_lock);
  489. }
  490. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  491. struct cdp_peer *peer_hdl,
  492. uint8_t *wds_macaddr,
  493. uint32_t flags)
  494. {
  495. int status = -1;
  496. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  497. struct dp_ast_entry *ast_entry = NULL;
  498. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  499. qdf_spin_lock_bh(&soc->ast_lock);
  500. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  501. peer->vdev->pdev->pdev_id);
  502. if (ast_entry) {
  503. status = dp_peer_update_ast(soc,
  504. peer,
  505. ast_entry, flags);
  506. }
  507. qdf_spin_unlock_bh(&soc->ast_lock);
  508. return status;
  509. }
  510. /*
  511. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  512. * @soc_handle: Datapath SOC handle
  513. * @wds_macaddr: WDS entry MAC Address
  514. * Return: None
  515. */
  516. static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  517. uint8_t *wds_macaddr, void *vdev_handle)
  518. {
  519. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  520. struct dp_ast_entry *ast_entry = NULL;
  521. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  522. qdf_spin_lock_bh(&soc->ast_lock);
  523. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  524. vdev->pdev->pdev_id);
  525. if (ast_entry) {
  526. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  527. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  528. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
  529. ast_entry->is_active = TRUE;
  530. }
  531. }
  532. qdf_spin_unlock_bh(&soc->ast_lock);
  533. }
  534. /*
  535. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  536. * @soc: Datapath SOC handle
  537. *
  538. * Return: None
  539. */
  540. static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  541. void *vdev_hdl)
  542. {
  543. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  544. struct dp_pdev *pdev;
  545. struct dp_vdev *vdev;
  546. struct dp_peer *peer;
  547. struct dp_ast_entry *ase, *temp_ase;
  548. int i;
  549. qdf_spin_lock_bh(&soc->ast_lock);
  550. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  551. pdev = soc->pdev_list[i];
  552. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  553. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  554. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  555. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  556. if ((ase->type ==
  557. CDP_TXRX_AST_TYPE_STATIC) ||
  558. (ase->type ==
  559. CDP_TXRX_AST_TYPE_SELF) ||
  560. (ase->type ==
  561. CDP_TXRX_AST_TYPE_STA_BSS))
  562. continue;
  563. ase->is_active = TRUE;
  564. }
  565. }
  566. }
  567. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  568. }
  569. qdf_spin_unlock_bh(&soc->ast_lock);
  570. }
  571. /*
  572. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  573. * @soc: Datapath SOC handle
  574. *
  575. * Return: None
  576. */
  577. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  578. {
  579. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  580. struct dp_pdev *pdev;
  581. struct dp_vdev *vdev;
  582. struct dp_peer *peer;
  583. struct dp_ast_entry *ase, *temp_ase;
  584. int i;
  585. qdf_spin_lock_bh(&soc->ast_lock);
  586. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  587. pdev = soc->pdev_list[i];
  588. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  589. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  590. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  591. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  592. if ((ase->type ==
  593. CDP_TXRX_AST_TYPE_STATIC) ||
  594. (ase->type ==
  595. CDP_TXRX_AST_TYPE_SELF) ||
  596. (ase->type ==
  597. CDP_TXRX_AST_TYPE_STA_BSS))
  598. continue;
  599. dp_peer_del_ast(soc, ase);
  600. }
  601. }
  602. }
  603. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  604. }
  605. qdf_spin_unlock_bh(&soc->ast_lock);
  606. }
  607. static void *dp_peer_ast_hash_find_soc_wifi3(struct cdp_soc_t *soc_hdl,
  608. uint8_t *ast_mac_addr)
  609. {
  610. struct dp_ast_entry *ast_entry;
  611. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  612. qdf_spin_lock_bh(&soc->ast_lock);
  613. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  614. qdf_spin_unlock_bh(&soc->ast_lock);
  615. return (void *)ast_entry;
  616. }
  617. static void *dp_peer_ast_hash_find_by_pdevid_wifi3(struct cdp_soc_t *soc_hdl,
  618. uint8_t *ast_mac_addr,
  619. uint8_t pdev_id)
  620. {
  621. struct dp_ast_entry *ast_entry;
  622. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  623. qdf_spin_lock_bh(&soc->ast_lock);
  624. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  625. qdf_spin_unlock_bh(&soc->ast_lock);
  626. return (void *)ast_entry;
  627. }
  628. static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
  629. void *ast_entry_hdl)
  630. {
  631. return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
  632. (struct dp_ast_entry *)ast_entry_hdl);
  633. }
  634. static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
  635. void *ast_entry_hdl)
  636. {
  637. return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
  638. (struct dp_ast_entry *)ast_entry_hdl);
  639. }
  640. static void dp_peer_ast_set_type_wifi3(
  641. struct cdp_soc_t *soc_hdl,
  642. void *ast_entry_hdl,
  643. enum cdp_txrx_ast_entry_type type)
  644. {
  645. dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
  646. (struct dp_ast_entry *)ast_entry_hdl,
  647. type);
  648. }
  649. static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
  650. struct cdp_soc_t *soc_hdl,
  651. void *ast_entry_hdl)
  652. {
  653. return ((struct dp_ast_entry *)ast_entry_hdl)->type;
  654. }
  655. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  656. void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  657. void *ast_entry,
  658. void *cp_ctx)
  659. {
  660. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  661. qdf_spin_lock_bh(&soc->ast_lock);
  662. dp_peer_ast_set_cp_ctx(soc,
  663. (struct dp_ast_entry *)ast_entry, cp_ctx);
  664. qdf_spin_unlock_bh(&soc->ast_lock);
  665. }
  666. void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  667. void *ast_entry)
  668. {
  669. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  670. void *cp_ctx = NULL;
  671. qdf_spin_lock_bh(&soc->ast_lock);
  672. cp_ctx = dp_peer_ast_get_cp_ctx(soc,
  673. (struct dp_ast_entry *)ast_entry);
  674. qdf_spin_unlock_bh(&soc->ast_lock);
  675. return cp_ctx;
  676. }
  677. bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
  678. void *ast_entry)
  679. {
  680. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  681. bool wmi_sent = false;
  682. qdf_spin_lock_bh(&soc->ast_lock);
  683. wmi_sent = dp_peer_ast_get_del_cmd_sent(soc,
  684. (struct dp_ast_entry *)
  685. ast_entry);
  686. qdf_spin_unlock_bh(&soc->ast_lock);
  687. return wmi_sent;
  688. }
  689. void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
  690. void *ast_entry)
  691. {
  692. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  693. qdf_spin_lock_bh(&soc->ast_lock);
  694. dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
  695. qdf_spin_unlock_bh(&soc->ast_lock);
  696. }
  697. #endif
  698. static struct cdp_peer *dp_peer_ast_get_peer_wifi3(
  699. struct cdp_soc_t *soc_hdl,
  700. void *ast_entry_hdl)
  701. {
  702. return (struct cdp_peer *)((struct dp_ast_entry *)ast_entry_hdl)->peer;
  703. }
  704. static uint32_t dp_peer_ast_get_nexhop_peer_id_wifi3(
  705. struct cdp_soc_t *soc_hdl,
  706. void *ast_entry_hdl)
  707. {
  708. return ((struct dp_ast_entry *)ast_entry_hdl)->peer->peer_ids[0];
  709. }
  710. /**
  711. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  712. * @ring_num: ring num of the ring being queried
  713. * @grp_mask: the grp_mask array for the ring type in question.
  714. *
  715. * The grp_mask array is indexed by group number and the bit fields correspond
  716. * to ring numbers. We are finding which interrupt group a ring belongs to.
  717. *
  718. * Return: the index in the grp_mask array with the ring number.
  719. * -QDF_STATUS_E_NOENT if no entry is found
  720. */
  721. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  722. {
  723. int ext_group_num;
  724. int mask = 1 << ring_num;
  725. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  726. ext_group_num++) {
  727. if (mask & grp_mask[ext_group_num])
  728. return ext_group_num;
  729. }
  730. return -QDF_STATUS_E_NOENT;
  731. }
  732. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  733. enum hal_ring_type ring_type,
  734. int ring_num)
  735. {
  736. int *grp_mask;
  737. switch (ring_type) {
  738. case WBM2SW_RELEASE:
  739. /* dp_tx_comp_handler - soc->tx_comp_ring */
  740. if (ring_num < 3)
  741. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  742. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  743. else if (ring_num == 3) {
  744. /* sw treats this as a separate ring type */
  745. grp_mask = &soc->wlan_cfg_ctx->
  746. int_rx_wbm_rel_ring_mask[0];
  747. ring_num = 0;
  748. } else {
  749. qdf_assert(0);
  750. return -QDF_STATUS_E_NOENT;
  751. }
  752. break;
  753. case REO_EXCEPTION:
  754. /* dp_rx_err_process - &soc->reo_exception_ring */
  755. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  756. break;
  757. case REO_DST:
  758. /* dp_rx_process - soc->reo_dest_ring */
  759. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  760. break;
  761. case REO_STATUS:
  762. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  763. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  764. break;
  765. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  766. case RXDMA_MONITOR_STATUS:
  767. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  768. case RXDMA_MONITOR_DST:
  769. /* dp_mon_process */
  770. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  771. break;
  772. case RXDMA_DST:
  773. /* dp_rxdma_err_process */
  774. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  775. break;
  776. case RXDMA_BUF:
  777. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  778. break;
  779. case RXDMA_MONITOR_BUF:
  780. /* TODO: support low_thresh interrupt */
  781. return -QDF_STATUS_E_NOENT;
  782. break;
  783. case TCL_DATA:
  784. case TCL_CMD:
  785. case REO_CMD:
  786. case SW2WBM_RELEASE:
  787. case WBM_IDLE_LINK:
  788. /* normally empty SW_TO_HW rings */
  789. return -QDF_STATUS_E_NOENT;
  790. break;
  791. case TCL_STATUS:
  792. case REO_REINJECT:
  793. /* misc unused rings */
  794. return -QDF_STATUS_E_NOENT;
  795. break;
  796. case CE_SRC:
  797. case CE_DST:
  798. case CE_DST_STATUS:
  799. /* CE_rings - currently handled by hif */
  800. default:
  801. return -QDF_STATUS_E_NOENT;
  802. break;
  803. }
  804. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  805. }
  806. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  807. *ring_params, int ring_type, int ring_num)
  808. {
  809. int msi_group_number;
  810. int msi_data_count;
  811. int ret;
  812. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  813. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  814. &msi_data_count, &msi_data_start,
  815. &msi_irq_start);
  816. if (ret)
  817. return;
  818. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  819. ring_num);
  820. if (msi_group_number < 0) {
  821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  822. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  823. ring_type, ring_num);
  824. ring_params->msi_addr = 0;
  825. ring_params->msi_data = 0;
  826. return;
  827. }
  828. if (msi_group_number > msi_data_count) {
  829. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  830. FL("2 msi_groups will share an msi; msi_group_num %d"),
  831. msi_group_number);
  832. QDF_ASSERT(0);
  833. }
  834. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  835. ring_params->msi_addr = addr_low;
  836. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  837. ring_params->msi_data = (msi_group_number % msi_data_count)
  838. + msi_data_start;
  839. ring_params->flags |= HAL_SRNG_MSI_INTR;
  840. }
  841. /**
  842. * dp_print_ast_stats() - Dump AST table contents
  843. * @soc: Datapath soc handle
  844. *
  845. * return void
  846. */
  847. #ifdef FEATURE_AST
  848. static void dp_print_ast_stats(struct dp_soc *soc)
  849. {
  850. uint8_t i;
  851. uint8_t num_entries = 0;
  852. struct dp_vdev *vdev;
  853. struct dp_pdev *pdev;
  854. struct dp_peer *peer;
  855. struct dp_ast_entry *ase, *tmp_ase;
  856. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  857. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  858. "DA", "HMWDS_SEC"};
  859. DP_PRINT_STATS("AST Stats:");
  860. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  861. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  862. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  863. DP_PRINT_STATS("AST Table:");
  864. qdf_spin_lock_bh(&soc->ast_lock);
  865. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  866. pdev = soc->pdev_list[i];
  867. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  868. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  869. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  870. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  871. DP_PRINT_STATS("%6d mac_addr = %pM"
  872. " peer_mac_addr = %pM"
  873. " type = %s"
  874. " next_hop = %d"
  875. " is_active = %d"
  876. " is_bss = %d"
  877. " ast_idx = %d"
  878. " ast_hash = %d"
  879. " pdev_id = %d"
  880. " vdev_id = %d"
  881. " del_cmd_sent = %d",
  882. ++num_entries,
  883. ase->mac_addr.raw,
  884. ase->peer->mac_addr.raw,
  885. type[ase->type],
  886. ase->next_hop,
  887. ase->is_active,
  888. ase->is_bss,
  889. ase->ast_idx,
  890. ase->ast_hash_value,
  891. ase->pdev_id,
  892. ase->vdev_id,
  893. ase->del_cmd_sent);
  894. }
  895. }
  896. }
  897. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  898. }
  899. qdf_spin_unlock_bh(&soc->ast_lock);
  900. }
  901. #else
  902. static void dp_print_ast_stats(struct dp_soc *soc)
  903. {
  904. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  905. return;
  906. }
  907. #endif
  908. /**
  909. * dp_print_peer_table() - Dump all Peer stats
  910. * @vdev: Datapath Vdev handle
  911. *
  912. * return void
  913. */
  914. static void dp_print_peer_table(struct dp_vdev *vdev)
  915. {
  916. struct dp_peer *peer = NULL;
  917. DP_PRINT_STATS("Dumping Peer Table Stats:");
  918. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  919. if (!peer) {
  920. DP_PRINT_STATS("Invalid Peer");
  921. return;
  922. }
  923. DP_PRINT_STATS(" peer_mac_addr = %pM nawds_enabled = %d",
  924. peer->mac_addr.raw,
  925. peer->nawds_enabled);
  926. DP_PRINT_STATS(" bss_peer = %d wapi = %d wds_enabled = %d",
  927. peer->bss_peer,
  928. peer->wapi,
  929. peer->wds_enabled);
  930. DP_PRINT_STATS(" delete in progress = %d peer id = %d",
  931. peer->delete_in_progress,
  932. peer->peer_ids[0]);
  933. }
  934. }
  935. /*
  936. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  937. */
  938. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  939. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  940. {
  941. void *hal_soc = soc->hal_soc;
  942. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  943. /* TODO: See if we should get align size from hal */
  944. uint32_t ring_base_align = 8;
  945. struct hal_srng_params ring_params;
  946. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  947. /* TODO: Currently hal layer takes care of endianness related settings.
  948. * See if these settings need to passed from DP layer
  949. */
  950. ring_params.flags = 0;
  951. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  952. srng->hal_srng = NULL;
  953. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  954. srng->num_entries = num_entries;
  955. if (!soc->dp_soc_reinit) {
  956. srng->base_vaddr_unaligned =
  957. qdf_mem_alloc_consistent(soc->osdev,
  958. soc->osdev->dev,
  959. srng->alloc_size,
  960. &srng->base_paddr_unaligned);
  961. }
  962. if (!srng->base_vaddr_unaligned) {
  963. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  964. FL("alloc failed - ring_type: %d, ring_num %d"),
  965. ring_type, ring_num);
  966. return QDF_STATUS_E_NOMEM;
  967. }
  968. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  969. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  970. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  971. ((unsigned long)(ring_params.ring_base_vaddr) -
  972. (unsigned long)srng->base_vaddr_unaligned);
  973. ring_params.num_entries = num_entries;
  974. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  975. FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
  976. ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
  977. (void *)ring_params.ring_base_paddr, ring_params.num_entries);
  978. if (soc->intr_mode == DP_INTR_MSI) {
  979. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  980. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  981. FL("Using MSI for ring_type: %d, ring_num %d"),
  982. ring_type, ring_num);
  983. } else {
  984. ring_params.msi_data = 0;
  985. ring_params.msi_addr = 0;
  986. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  987. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  988. ring_type, ring_num);
  989. }
  990. /*
  991. * Setup interrupt timer and batch counter thresholds for
  992. * interrupt mitigation based on ring type
  993. */
  994. if (ring_type == REO_DST) {
  995. ring_params.intr_timer_thres_us =
  996. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  997. ring_params.intr_batch_cntr_thres_entries =
  998. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  999. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  1000. ring_params.intr_timer_thres_us =
  1001. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  1002. ring_params.intr_batch_cntr_thres_entries =
  1003. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  1004. } else {
  1005. ring_params.intr_timer_thres_us =
  1006. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1007. ring_params.intr_batch_cntr_thres_entries =
  1008. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1009. }
  1010. /* Enable low threshold interrupts for rx buffer rings (regular and
  1011. * monitor buffer rings.
  1012. * TODO: See if this is required for any other ring
  1013. */
  1014. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  1015. (ring_type == RXDMA_MONITOR_STATUS)) {
  1016. /* TODO: Setting low threshold to 1/8th of ring size
  1017. * see if this needs to be configurable
  1018. */
  1019. ring_params.low_threshold = num_entries >> 3;
  1020. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1021. ring_params.intr_timer_thres_us =
  1022. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1023. ring_params.intr_batch_cntr_thres_entries = 0;
  1024. }
  1025. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1026. mac_id, &ring_params);
  1027. if (!srng->hal_srng) {
  1028. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1029. srng->alloc_size,
  1030. srng->base_vaddr_unaligned,
  1031. srng->base_paddr_unaligned, 0);
  1032. }
  1033. return 0;
  1034. }
  1035. /*
  1036. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1037. * @soc: DP SOC handle
  1038. * @srng: source ring structure
  1039. * @ring_type: type of ring
  1040. * @ring_num: ring number
  1041. *
  1042. * Return: None
  1043. */
  1044. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1045. int ring_type, int ring_num)
  1046. {
  1047. }
  1048. /**
  1049. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1050. * Any buffers allocated and attached to ring entries are expected to be freed
  1051. * before calling this function.
  1052. */
  1053. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1054. int ring_type, int ring_num)
  1055. {
  1056. if (!srng->hal_srng) {
  1057. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1058. FL("Ring type: %d, num:%d not setup"),
  1059. ring_type, ring_num);
  1060. return;
  1061. }
  1062. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1063. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1064. srng->alloc_size,
  1065. srng->base_vaddr_unaligned,
  1066. srng->base_paddr_unaligned, 0);
  1067. srng->hal_srng = NULL;
  1068. }
  1069. /* TODO: Need this interface from HIF */
  1070. void *hif_get_hal_handle(void *hif_handle);
  1071. /*
  1072. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1073. * @dp_ctx: DP SOC handle
  1074. * @budget: Number of frames/descriptors that can be processed in one shot
  1075. *
  1076. * Return: remaining budget/quota for the soc device
  1077. */
  1078. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1079. {
  1080. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1081. struct dp_soc *soc = int_ctx->soc;
  1082. int ring = 0;
  1083. uint32_t work_done = 0;
  1084. int budget = dp_budget;
  1085. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1086. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1087. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1088. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1089. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1090. uint32_t remaining_quota = dp_budget;
  1091. struct dp_pdev *pdev = NULL;
  1092. int mac_id;
  1093. /* Process Tx completion interrupts first to return back buffers */
  1094. while (tx_mask) {
  1095. if (tx_mask & 0x1) {
  1096. work_done = dp_tx_comp_handler(soc,
  1097. soc->tx_comp_ring[ring].hal_srng,
  1098. remaining_quota);
  1099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1100. "tx mask 0x%x ring %d, budget %d, work_done %d",
  1101. tx_mask, ring, budget, work_done);
  1102. budget -= work_done;
  1103. if (budget <= 0)
  1104. goto budget_done;
  1105. remaining_quota = budget;
  1106. }
  1107. tx_mask = tx_mask >> 1;
  1108. ring++;
  1109. }
  1110. /* Process REO Exception ring interrupt */
  1111. if (rx_err_mask) {
  1112. work_done = dp_rx_err_process(soc,
  1113. soc->reo_exception_ring.hal_srng,
  1114. remaining_quota);
  1115. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1116. "REO Exception Ring: work_done %d budget %d",
  1117. work_done, budget);
  1118. budget -= work_done;
  1119. if (budget <= 0) {
  1120. goto budget_done;
  1121. }
  1122. remaining_quota = budget;
  1123. }
  1124. /* Process Rx WBM release ring interrupt */
  1125. if (rx_wbm_rel_mask) {
  1126. work_done = dp_rx_wbm_err_process(soc,
  1127. soc->rx_rel_ring.hal_srng, remaining_quota);
  1128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1129. "WBM Release Ring: work_done %d budget %d",
  1130. work_done, budget);
  1131. budget -= work_done;
  1132. if (budget <= 0) {
  1133. goto budget_done;
  1134. }
  1135. remaining_quota = budget;
  1136. }
  1137. /* Process Rx interrupts */
  1138. if (rx_mask) {
  1139. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1140. if (rx_mask & (1 << ring)) {
  1141. work_done = dp_rx_process(int_ctx,
  1142. soc->reo_dest_ring[ring].hal_srng,
  1143. ring,
  1144. remaining_quota);
  1145. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1146. "rx mask 0x%x ring %d, work_done %d budget %d",
  1147. rx_mask, ring, work_done, budget);
  1148. budget -= work_done;
  1149. if (budget <= 0)
  1150. goto budget_done;
  1151. remaining_quota = budget;
  1152. }
  1153. }
  1154. }
  1155. if (reo_status_mask)
  1156. dp_reo_status_ring_handler(soc);
  1157. /* Process LMAC interrupts */
  1158. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1159. pdev = soc->pdev_list[ring];
  1160. if (pdev == NULL)
  1161. continue;
  1162. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1163. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1164. pdev->pdev_id);
  1165. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1166. work_done = dp_mon_process(soc, mac_for_pdev,
  1167. remaining_quota);
  1168. budget -= work_done;
  1169. if (budget <= 0)
  1170. goto budget_done;
  1171. remaining_quota = budget;
  1172. }
  1173. if (int_ctx->rxdma2host_ring_mask &
  1174. (1 << mac_for_pdev)) {
  1175. work_done = dp_rxdma_err_process(soc,
  1176. mac_for_pdev,
  1177. remaining_quota);
  1178. budget -= work_done;
  1179. if (budget <= 0)
  1180. goto budget_done;
  1181. remaining_quota = budget;
  1182. }
  1183. if (int_ctx->host2rxdma_ring_mask &
  1184. (1 << mac_for_pdev)) {
  1185. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1186. union dp_rx_desc_list_elem_t *tail = NULL;
  1187. struct dp_srng *rx_refill_buf_ring =
  1188. &pdev->rx_refill_buf_ring;
  1189. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1190. 1);
  1191. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1192. rx_refill_buf_ring,
  1193. &soc->rx_desc_buf[mac_for_pdev], 0,
  1194. &desc_list, &tail);
  1195. }
  1196. }
  1197. }
  1198. qdf_lro_flush(int_ctx->lro_ctx);
  1199. budget_done:
  1200. return dp_budget - budget;
  1201. }
  1202. /* dp_interrupt_timer()- timer poll for interrupts
  1203. *
  1204. * @arg: SoC Handle
  1205. *
  1206. * Return:
  1207. *
  1208. */
  1209. static void dp_interrupt_timer(void *arg)
  1210. {
  1211. struct dp_soc *soc = (struct dp_soc *) arg;
  1212. int i;
  1213. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1214. for (i = 0;
  1215. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1216. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1217. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1218. }
  1219. }
  1220. /*
  1221. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1222. * @txrx_soc: DP SOC handle
  1223. *
  1224. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1225. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1226. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1227. *
  1228. * Return: 0 for success, nonzero for failure.
  1229. */
  1230. static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
  1231. {
  1232. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1233. int i;
  1234. soc->intr_mode = DP_INTR_POLL;
  1235. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1236. soc->intr_ctx[i].dp_intr_id = i;
  1237. soc->intr_ctx[i].tx_ring_mask =
  1238. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1239. soc->intr_ctx[i].rx_ring_mask =
  1240. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1241. soc->intr_ctx[i].rx_mon_ring_mask =
  1242. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1243. soc->intr_ctx[i].rx_err_ring_mask =
  1244. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1245. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1246. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1247. soc->intr_ctx[i].reo_status_ring_mask =
  1248. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1249. soc->intr_ctx[i].rxdma2host_ring_mask =
  1250. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1251. soc->intr_ctx[i].soc = soc;
  1252. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1253. }
  1254. qdf_timer_init(soc->osdev, &soc->int_timer,
  1255. dp_interrupt_timer, (void *)soc,
  1256. QDF_TIMER_TYPE_WAKE_APPS);
  1257. return QDF_STATUS_SUCCESS;
  1258. }
  1259. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  1260. #if defined(CONFIG_MCL)
  1261. /*
  1262. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1263. * @txrx_soc: DP SOC handle
  1264. *
  1265. * Call the appropriate attach function based on the mode of operation.
  1266. * This is a WAR for enabling monitor mode.
  1267. *
  1268. * Return: 0 for success. nonzero for failure.
  1269. */
  1270. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1271. {
  1272. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1273. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1274. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  1275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1276. "%s: Poll mode", __func__);
  1277. return dp_soc_attach_poll(txrx_soc);
  1278. } else {
  1279. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1280. "%s: Interrupt mode", __func__);
  1281. return dp_soc_interrupt_attach(txrx_soc);
  1282. }
  1283. }
  1284. #else
  1285. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1286. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1287. {
  1288. return dp_soc_attach_poll(txrx_soc);
  1289. }
  1290. #else
  1291. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1292. {
  1293. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1294. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1295. return dp_soc_attach_poll(txrx_soc);
  1296. else
  1297. return dp_soc_interrupt_attach(txrx_soc);
  1298. }
  1299. #endif
  1300. #endif
  1301. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1302. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1303. {
  1304. int j;
  1305. int num_irq = 0;
  1306. int tx_mask =
  1307. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1308. int rx_mask =
  1309. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1310. int rx_mon_mask =
  1311. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1312. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1313. soc->wlan_cfg_ctx, intr_ctx_num);
  1314. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1315. soc->wlan_cfg_ctx, intr_ctx_num);
  1316. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1317. soc->wlan_cfg_ctx, intr_ctx_num);
  1318. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1319. soc->wlan_cfg_ctx, intr_ctx_num);
  1320. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1321. soc->wlan_cfg_ctx, intr_ctx_num);
  1322. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1323. soc->wlan_cfg_ctx, intr_ctx_num);
  1324. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1325. if (tx_mask & (1 << j)) {
  1326. irq_id_map[num_irq++] =
  1327. (wbm2host_tx_completions_ring1 - j);
  1328. }
  1329. if (rx_mask & (1 << j)) {
  1330. irq_id_map[num_irq++] =
  1331. (reo2host_destination_ring1 - j);
  1332. }
  1333. if (rxdma2host_ring_mask & (1 << j)) {
  1334. irq_id_map[num_irq++] =
  1335. rxdma2host_destination_ring_mac1 -
  1336. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1337. }
  1338. if (host2rxdma_ring_mask & (1 << j)) {
  1339. irq_id_map[num_irq++] =
  1340. host2rxdma_host_buf_ring_mac1 -
  1341. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1342. }
  1343. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1344. irq_id_map[num_irq++] =
  1345. host2rxdma_monitor_ring1 -
  1346. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1347. }
  1348. if (rx_mon_mask & (1 << j)) {
  1349. irq_id_map[num_irq++] =
  1350. ppdu_end_interrupts_mac1 -
  1351. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1352. irq_id_map[num_irq++] =
  1353. rxdma2host_monitor_status_ring_mac1 -
  1354. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1355. }
  1356. if (rx_wbm_rel_ring_mask & (1 << j))
  1357. irq_id_map[num_irq++] = wbm2host_rx_release;
  1358. if (rx_err_ring_mask & (1 << j))
  1359. irq_id_map[num_irq++] = reo2host_exception;
  1360. if (reo_status_ring_mask & (1 << j))
  1361. irq_id_map[num_irq++] = reo2host_status;
  1362. }
  1363. *num_irq_r = num_irq;
  1364. }
  1365. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1366. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1367. int msi_vector_count, int msi_vector_start)
  1368. {
  1369. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1370. soc->wlan_cfg_ctx, intr_ctx_num);
  1371. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1372. soc->wlan_cfg_ctx, intr_ctx_num);
  1373. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1374. soc->wlan_cfg_ctx, intr_ctx_num);
  1375. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1376. soc->wlan_cfg_ctx, intr_ctx_num);
  1377. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1378. soc->wlan_cfg_ctx, intr_ctx_num);
  1379. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1380. soc->wlan_cfg_ctx, intr_ctx_num);
  1381. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1382. soc->wlan_cfg_ctx, intr_ctx_num);
  1383. unsigned int vector =
  1384. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1385. int num_irq = 0;
  1386. soc->intr_mode = DP_INTR_MSI;
  1387. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1388. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1389. irq_id_map[num_irq++] =
  1390. pld_get_msi_irq(soc->osdev->dev, vector);
  1391. *num_irq_r = num_irq;
  1392. }
  1393. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1394. int *irq_id_map, int *num_irq)
  1395. {
  1396. int msi_vector_count, ret;
  1397. uint32_t msi_base_data, msi_vector_start;
  1398. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1399. &msi_vector_count,
  1400. &msi_base_data,
  1401. &msi_vector_start);
  1402. if (ret)
  1403. return dp_soc_interrupt_map_calculate_integrated(soc,
  1404. intr_ctx_num, irq_id_map, num_irq);
  1405. else
  1406. dp_soc_interrupt_map_calculate_msi(soc,
  1407. intr_ctx_num, irq_id_map, num_irq,
  1408. msi_vector_count, msi_vector_start);
  1409. }
  1410. /*
  1411. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1412. * @txrx_soc: DP SOC handle
  1413. *
  1414. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1415. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1416. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1417. *
  1418. * Return: 0 for success. nonzero for failure.
  1419. */
  1420. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  1421. {
  1422. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1423. int i = 0;
  1424. int num_irq = 0;
  1425. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1426. int ret = 0;
  1427. /* Map of IRQ ids registered with one interrupt context */
  1428. int irq_id_map[HIF_MAX_GRP_IRQ];
  1429. int tx_mask =
  1430. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1431. int rx_mask =
  1432. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1433. int rx_mon_mask =
  1434. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1435. int rx_err_ring_mask =
  1436. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1437. int rx_wbm_rel_ring_mask =
  1438. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1439. int reo_status_ring_mask =
  1440. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1441. int rxdma2host_ring_mask =
  1442. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1443. int host2rxdma_ring_mask =
  1444. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1445. int host2rxdma_mon_ring_mask =
  1446. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1447. soc->wlan_cfg_ctx, i);
  1448. soc->intr_ctx[i].dp_intr_id = i;
  1449. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1450. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1451. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1452. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1453. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1454. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1455. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1456. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1457. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1458. host2rxdma_mon_ring_mask;
  1459. soc->intr_ctx[i].soc = soc;
  1460. num_irq = 0;
  1461. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1462. &num_irq);
  1463. ret = hif_register_ext_group(soc->hif_handle,
  1464. num_irq, irq_id_map, dp_service_srngs,
  1465. &soc->intr_ctx[i], "dp_intr",
  1466. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1467. if (ret) {
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1469. FL("failed, ret = %d"), ret);
  1470. return QDF_STATUS_E_FAILURE;
  1471. }
  1472. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1473. }
  1474. hif_configure_ext_group_interrupts(soc->hif_handle);
  1475. return QDF_STATUS_SUCCESS;
  1476. }
  1477. /*
  1478. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1479. * @txrx_soc: DP SOC handle
  1480. *
  1481. * Return: void
  1482. */
  1483. static void dp_soc_interrupt_detach(void *txrx_soc)
  1484. {
  1485. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1486. int i;
  1487. if (soc->intr_mode == DP_INTR_POLL) {
  1488. qdf_timer_stop(&soc->int_timer);
  1489. qdf_timer_free(&soc->int_timer);
  1490. } else {
  1491. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1492. }
  1493. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1494. soc->intr_ctx[i].tx_ring_mask = 0;
  1495. soc->intr_ctx[i].rx_ring_mask = 0;
  1496. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1497. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1498. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1499. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1500. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1501. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1502. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1503. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1504. }
  1505. }
  1506. #define AVG_MAX_MPDUS_PER_TID 128
  1507. #define AVG_TIDS_PER_CLIENT 2
  1508. #define AVG_FLOWS_PER_TID 2
  1509. #define AVG_MSDUS_PER_FLOW 128
  1510. #define AVG_MSDUS_PER_MPDU 4
  1511. /*
  1512. * Allocate and setup link descriptor pool that will be used by HW for
  1513. * various link and queue descriptors and managed by WBM
  1514. */
  1515. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1516. {
  1517. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1518. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1519. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1520. uint32_t num_mpdus_per_link_desc =
  1521. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1522. uint32_t num_msdus_per_link_desc =
  1523. hal_num_msdus_per_link_desc(soc->hal_soc);
  1524. uint32_t num_mpdu_links_per_queue_desc =
  1525. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1526. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1527. uint32_t total_link_descs, total_mem_size;
  1528. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1529. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1530. uint32_t num_link_desc_banks;
  1531. uint32_t last_bank_size = 0;
  1532. uint32_t entry_size, num_entries;
  1533. int i;
  1534. uint32_t desc_id = 0;
  1535. qdf_dma_addr_t *baseaddr = NULL;
  1536. /* Only Tx queue descriptors are allocated from common link descriptor
  1537. * pool Rx queue descriptors are not included in this because (REO queue
  1538. * extension descriptors) they are expected to be allocated contiguously
  1539. * with REO queue descriptors
  1540. */
  1541. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1542. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1543. num_mpdu_queue_descs = num_mpdu_link_descs /
  1544. num_mpdu_links_per_queue_desc;
  1545. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1546. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1547. num_msdus_per_link_desc;
  1548. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1549. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1550. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1551. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1552. /* Round up to power of 2 */
  1553. total_link_descs = 1;
  1554. while (total_link_descs < num_entries)
  1555. total_link_descs <<= 1;
  1556. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1557. FL("total_link_descs: %u, link_desc_size: %d"),
  1558. total_link_descs, link_desc_size);
  1559. total_mem_size = total_link_descs * link_desc_size;
  1560. total_mem_size += link_desc_align;
  1561. if (total_mem_size <= max_alloc_size) {
  1562. num_link_desc_banks = 0;
  1563. last_bank_size = total_mem_size;
  1564. } else {
  1565. num_link_desc_banks = (total_mem_size) /
  1566. (max_alloc_size - link_desc_align);
  1567. last_bank_size = total_mem_size %
  1568. (max_alloc_size - link_desc_align);
  1569. }
  1570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1571. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1572. total_mem_size, num_link_desc_banks);
  1573. for (i = 0; i < num_link_desc_banks; i++) {
  1574. if (!soc->dp_soc_reinit) {
  1575. baseaddr = &soc->link_desc_banks[i].
  1576. base_paddr_unaligned;
  1577. soc->link_desc_banks[i].base_vaddr_unaligned =
  1578. qdf_mem_alloc_consistent(soc->osdev,
  1579. soc->osdev->dev,
  1580. max_alloc_size,
  1581. baseaddr);
  1582. }
  1583. soc->link_desc_banks[i].size = max_alloc_size;
  1584. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1585. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1586. ((unsigned long)(
  1587. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1588. link_desc_align));
  1589. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1590. soc->link_desc_banks[i].base_paddr_unaligned) +
  1591. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1592. (unsigned long)(
  1593. soc->link_desc_banks[i].base_vaddr_unaligned));
  1594. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1595. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1596. FL("Link descriptor memory alloc failed"));
  1597. goto fail;
  1598. }
  1599. }
  1600. if (last_bank_size) {
  1601. /* Allocate last bank in case total memory required is not exact
  1602. * multiple of max_alloc_size
  1603. */
  1604. if (!soc->dp_soc_reinit) {
  1605. baseaddr = &soc->link_desc_banks[i].
  1606. base_paddr_unaligned;
  1607. soc->link_desc_banks[i].base_vaddr_unaligned =
  1608. qdf_mem_alloc_consistent(soc->osdev,
  1609. soc->osdev->dev,
  1610. last_bank_size,
  1611. baseaddr);
  1612. }
  1613. soc->link_desc_banks[i].size = last_bank_size;
  1614. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1615. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1616. ((unsigned long)(
  1617. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1618. link_desc_align));
  1619. soc->link_desc_banks[i].base_paddr =
  1620. (unsigned long)(
  1621. soc->link_desc_banks[i].base_paddr_unaligned) +
  1622. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1623. (unsigned long)(
  1624. soc->link_desc_banks[i].base_vaddr_unaligned));
  1625. }
  1626. /* Allocate and setup link descriptor idle list for HW internal use */
  1627. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1628. total_mem_size = entry_size * total_link_descs;
  1629. if (total_mem_size <= max_alloc_size) {
  1630. void *desc;
  1631. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1632. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1633. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1634. FL("Link desc idle ring setup failed"));
  1635. goto fail;
  1636. }
  1637. hal_srng_access_start_unlocked(soc->hal_soc,
  1638. soc->wbm_idle_link_ring.hal_srng);
  1639. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1640. soc->link_desc_banks[i].base_paddr; i++) {
  1641. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1642. ((unsigned long)(
  1643. soc->link_desc_banks[i].base_vaddr) -
  1644. (unsigned long)(
  1645. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1646. / link_desc_size;
  1647. unsigned long paddr = (unsigned long)(
  1648. soc->link_desc_banks[i].base_paddr);
  1649. while (num_entries && (desc = hal_srng_src_get_next(
  1650. soc->hal_soc,
  1651. soc->wbm_idle_link_ring.hal_srng))) {
  1652. hal_set_link_desc_addr(desc,
  1653. LINK_DESC_COOKIE(desc_id, i), paddr);
  1654. num_entries--;
  1655. desc_id++;
  1656. paddr += link_desc_size;
  1657. }
  1658. }
  1659. hal_srng_access_end_unlocked(soc->hal_soc,
  1660. soc->wbm_idle_link_ring.hal_srng);
  1661. } else {
  1662. uint32_t num_scatter_bufs;
  1663. uint32_t num_entries_per_buf;
  1664. uint32_t rem_entries;
  1665. uint8_t *scatter_buf_ptr;
  1666. uint16_t scatter_buf_num;
  1667. uint32_t buf_size = 0;
  1668. soc->wbm_idle_scatter_buf_size =
  1669. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1670. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1671. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1672. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1673. soc->hal_soc, total_mem_size,
  1674. soc->wbm_idle_scatter_buf_size);
  1675. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1676. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1677. FL("scatter bufs size out of bounds"));
  1678. goto fail;
  1679. }
  1680. for (i = 0; i < num_scatter_bufs; i++) {
  1681. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1682. if (!soc->dp_soc_reinit) {
  1683. buf_size = soc->wbm_idle_scatter_buf_size;
  1684. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1685. qdf_mem_alloc_consistent(soc->osdev,
  1686. soc->osdev->
  1687. dev,
  1688. buf_size,
  1689. baseaddr);
  1690. }
  1691. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1692. QDF_TRACE(QDF_MODULE_ID_DP,
  1693. QDF_TRACE_LEVEL_ERROR,
  1694. FL("Scatter lst memory alloc fail"));
  1695. goto fail;
  1696. }
  1697. }
  1698. /* Populate idle list scatter buffers with link descriptor
  1699. * pointers
  1700. */
  1701. scatter_buf_num = 0;
  1702. scatter_buf_ptr = (uint8_t *)(
  1703. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1704. rem_entries = num_entries_per_buf;
  1705. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1706. soc->link_desc_banks[i].base_paddr; i++) {
  1707. uint32_t num_link_descs =
  1708. (soc->link_desc_banks[i].size -
  1709. ((unsigned long)(
  1710. soc->link_desc_banks[i].base_vaddr) -
  1711. (unsigned long)(
  1712. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1713. / link_desc_size;
  1714. unsigned long paddr = (unsigned long)(
  1715. soc->link_desc_banks[i].base_paddr);
  1716. while (num_link_descs) {
  1717. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1718. LINK_DESC_COOKIE(desc_id, i), paddr);
  1719. num_link_descs--;
  1720. desc_id++;
  1721. paddr += link_desc_size;
  1722. rem_entries--;
  1723. if (rem_entries) {
  1724. scatter_buf_ptr += entry_size;
  1725. } else {
  1726. rem_entries = num_entries_per_buf;
  1727. scatter_buf_num++;
  1728. if (scatter_buf_num >= num_scatter_bufs)
  1729. break;
  1730. scatter_buf_ptr = (uint8_t *)(
  1731. soc->wbm_idle_scatter_buf_base_vaddr[
  1732. scatter_buf_num]);
  1733. }
  1734. }
  1735. }
  1736. /* Setup link descriptor idle list in HW */
  1737. hal_setup_link_idle_list(soc->hal_soc,
  1738. soc->wbm_idle_scatter_buf_base_paddr,
  1739. soc->wbm_idle_scatter_buf_base_vaddr,
  1740. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1741. (uint32_t)(scatter_buf_ptr -
  1742. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1743. scatter_buf_num-1])), total_link_descs);
  1744. }
  1745. return 0;
  1746. fail:
  1747. if (soc->wbm_idle_link_ring.hal_srng) {
  1748. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1749. WBM_IDLE_LINK, 0);
  1750. }
  1751. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1752. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1753. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1754. soc->wbm_idle_scatter_buf_size,
  1755. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1756. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1757. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1758. }
  1759. }
  1760. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1761. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1762. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1763. soc->link_desc_banks[i].size,
  1764. soc->link_desc_banks[i].base_vaddr_unaligned,
  1765. soc->link_desc_banks[i].base_paddr_unaligned,
  1766. 0);
  1767. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1768. }
  1769. }
  1770. return QDF_STATUS_E_FAILURE;
  1771. }
  1772. /*
  1773. * Free link descriptor pool that was setup HW
  1774. */
  1775. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1776. {
  1777. int i;
  1778. if (soc->wbm_idle_link_ring.hal_srng) {
  1779. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1780. WBM_IDLE_LINK, 0);
  1781. }
  1782. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1783. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1784. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1785. soc->wbm_idle_scatter_buf_size,
  1786. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1787. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1788. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1789. }
  1790. }
  1791. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1792. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1793. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1794. soc->link_desc_banks[i].size,
  1795. soc->link_desc_banks[i].base_vaddr_unaligned,
  1796. soc->link_desc_banks[i].base_paddr_unaligned,
  1797. 0);
  1798. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1799. }
  1800. }
  1801. }
  1802. #ifdef IPA_OFFLOAD
  1803. #define REO_DST_RING_SIZE_QCA6290 1023
  1804. #ifndef QCA_WIFI_QCA8074_VP
  1805. #define REO_DST_RING_SIZE_QCA8074 1023
  1806. #else
  1807. #define REO_DST_RING_SIZE_QCA8074 8
  1808. #endif /* QCA_WIFI_QCA8074_VP */
  1809. #else
  1810. #define REO_DST_RING_SIZE_QCA6290 1024
  1811. #ifndef QCA_WIFI_QCA8074_VP
  1812. #define REO_DST_RING_SIZE_QCA8074 2048
  1813. #else
  1814. #define REO_DST_RING_SIZE_QCA8074 8
  1815. #endif /* QCA_WIFI_QCA8074_VP */
  1816. #endif /* IPA_OFFLOAD */
  1817. /*
  1818. * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
  1819. * @soc: Datapath SOC handle
  1820. *
  1821. * This is a timer function used to age out stale AST nodes from
  1822. * AST table
  1823. */
  1824. #ifdef FEATURE_WDS
  1825. static void dp_ast_aging_timer_fn(void *soc_hdl)
  1826. {
  1827. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1828. struct dp_pdev *pdev;
  1829. struct dp_vdev *vdev;
  1830. struct dp_peer *peer;
  1831. struct dp_ast_entry *ase, *temp_ase;
  1832. int i;
  1833. bool check_wds_ase = false;
  1834. if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
  1835. soc->wds_ast_aging_timer_cnt = 0;
  1836. check_wds_ase = true;
  1837. }
  1838. /* Peer list access lock */
  1839. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1840. /* AST list access lock */
  1841. qdf_spin_lock_bh(&soc->ast_lock);
  1842. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1843. pdev = soc->pdev_list[i];
  1844. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1845. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1846. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1847. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1848. /*
  1849. * Do not expire static ast entries
  1850. * and HM WDS entries
  1851. */
  1852. if (ase->type !=
  1853. CDP_TXRX_AST_TYPE_WDS &&
  1854. ase->type !=
  1855. CDP_TXRX_AST_TYPE_MEC &&
  1856. ase->type !=
  1857. CDP_TXRX_AST_TYPE_DA)
  1858. continue;
  1859. /* Expire MEC entry every n sec.
  1860. * This needs to be expired in
  1861. * case if STA backbone is made as
  1862. * AP backbone, In this case it needs
  1863. * to be re-added as a WDS entry.
  1864. */
  1865. if (ase->is_active && ase->type ==
  1866. CDP_TXRX_AST_TYPE_MEC) {
  1867. ase->is_active = FALSE;
  1868. continue;
  1869. } else if (ase->is_active &&
  1870. check_wds_ase) {
  1871. ase->is_active = FALSE;
  1872. continue;
  1873. }
  1874. if (ase->type ==
  1875. CDP_TXRX_AST_TYPE_MEC) {
  1876. DP_STATS_INC(soc,
  1877. ast.aged_out, 1);
  1878. dp_peer_del_ast(soc, ase);
  1879. } else if (check_wds_ase) {
  1880. DP_STATS_INC(soc,
  1881. ast.aged_out, 1);
  1882. dp_peer_del_ast(soc, ase);
  1883. }
  1884. }
  1885. }
  1886. }
  1887. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1888. }
  1889. qdf_spin_unlock_bh(&soc->ast_lock);
  1890. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1891. if (qdf_atomic_read(&soc->cmn_init_done))
  1892. qdf_timer_mod(&soc->ast_aging_timer,
  1893. DP_AST_AGING_TIMER_DEFAULT_MS);
  1894. }
  1895. /*
  1896. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1897. * @soc: Datapath SOC handle
  1898. *
  1899. * Return: None
  1900. */
  1901. static void dp_soc_wds_attach(struct dp_soc *soc)
  1902. {
  1903. soc->wds_ast_aging_timer_cnt = 0;
  1904. qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
  1905. dp_ast_aging_timer_fn, (void *)soc,
  1906. QDF_TIMER_TYPE_WAKE_APPS);
  1907. qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
  1908. }
  1909. /*
  1910. * dp_soc_wds_detach() - Detach WDS data structures and timers
  1911. * @txrx_soc: DP SOC handle
  1912. *
  1913. * Return: None
  1914. */
  1915. static void dp_soc_wds_detach(struct dp_soc *soc)
  1916. {
  1917. qdf_timer_stop(&soc->ast_aging_timer);
  1918. qdf_timer_free(&soc->ast_aging_timer);
  1919. }
  1920. #else
  1921. static void dp_soc_wds_attach(struct dp_soc *soc)
  1922. {
  1923. }
  1924. static void dp_soc_wds_detach(struct dp_soc *soc)
  1925. {
  1926. }
  1927. #endif
  1928. /*
  1929. * dp_soc_reset_ring_map() - Reset cpu ring map
  1930. * @soc: Datapath soc handler
  1931. *
  1932. * This api resets the default cpu ring map
  1933. */
  1934. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  1935. {
  1936. uint8_t i;
  1937. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1938. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  1939. switch (nss_config) {
  1940. case dp_nss_cfg_first_radio:
  1941. /*
  1942. * Setting Tx ring map for one nss offloaded radio
  1943. */
  1944. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  1945. break;
  1946. case dp_nss_cfg_second_radio:
  1947. /*
  1948. * Setting Tx ring for two nss offloaded radios
  1949. */
  1950. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  1951. break;
  1952. case dp_nss_cfg_dbdc:
  1953. /*
  1954. * Setting Tx ring map for 2 nss offloaded radios
  1955. */
  1956. soc->tx_ring_map[i] =
  1957. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  1958. break;
  1959. case dp_nss_cfg_dbtc:
  1960. /*
  1961. * Setting Tx ring map for 3 nss offloaded radios
  1962. */
  1963. soc->tx_ring_map[i] =
  1964. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  1965. break;
  1966. default:
  1967. dp_err("tx_ring_map failed due to invalid nss cfg");
  1968. break;
  1969. }
  1970. }
  1971. }
  1972. /*
  1973. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  1974. * @dp_soc - DP soc handle
  1975. * @ring_type - ring type
  1976. * @ring_num - ring_num
  1977. *
  1978. * return 0 or 1
  1979. */
  1980. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  1981. {
  1982. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1983. uint8_t status = 0;
  1984. switch (ring_type) {
  1985. case WBM2SW_RELEASE:
  1986. case REO_DST:
  1987. case RXDMA_BUF:
  1988. status = ((nss_config) & (1 << ring_num));
  1989. break;
  1990. default:
  1991. break;
  1992. }
  1993. return status;
  1994. }
  1995. /*
  1996. * dp_soc_reset_intr_mask() - reset interrupt mask
  1997. * @dp_soc - DP Soc handle
  1998. *
  1999. * Return: Return void
  2000. */
  2001. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  2002. {
  2003. uint8_t j;
  2004. int *grp_mask = NULL;
  2005. int group_number, mask, num_ring;
  2006. /* number of tx ring */
  2007. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  2008. /*
  2009. * group mask for tx completion ring.
  2010. */
  2011. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  2012. /* loop and reset the mask for only offloaded ring */
  2013. for (j = 0; j < num_ring; j++) {
  2014. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  2015. continue;
  2016. }
  2017. /*
  2018. * Group number corresponding to tx offloaded ring.
  2019. */
  2020. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2021. if (group_number < 0) {
  2022. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2023. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2024. WBM2SW_RELEASE, j);
  2025. return;
  2026. }
  2027. /* reset the tx mask for offloaded ring */
  2028. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2029. mask &= (~(1 << j));
  2030. /*
  2031. * reset the interrupt mask for offloaded ring.
  2032. */
  2033. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2034. }
  2035. /* number of rx rings */
  2036. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2037. /*
  2038. * group mask for reo destination ring.
  2039. */
  2040. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2041. /* loop and reset the mask for only offloaded ring */
  2042. for (j = 0; j < num_ring; j++) {
  2043. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2044. continue;
  2045. }
  2046. /*
  2047. * Group number corresponding to rx offloaded ring.
  2048. */
  2049. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2050. if (group_number < 0) {
  2051. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2052. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2053. REO_DST, j);
  2054. return;
  2055. }
  2056. /* set the interrupt mask for offloaded ring */
  2057. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2058. mask &= (~(1 << j));
  2059. /*
  2060. * set the interrupt mask to zero for rx offloaded radio.
  2061. */
  2062. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2063. }
  2064. /*
  2065. * group mask for Rx buffer refill ring
  2066. */
  2067. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2068. /* loop and reset the mask for only offloaded ring */
  2069. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2070. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2071. continue;
  2072. }
  2073. /*
  2074. * Group number corresponding to rx offloaded ring.
  2075. */
  2076. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2077. if (group_number < 0) {
  2078. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2079. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2080. REO_DST, j);
  2081. return;
  2082. }
  2083. /* set the interrupt mask for offloaded ring */
  2084. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2085. group_number);
  2086. mask &= (~(1 << j));
  2087. /*
  2088. * set the interrupt mask to zero for rx offloaded radio.
  2089. */
  2090. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2091. group_number, mask);
  2092. }
  2093. }
  2094. #ifdef IPA_OFFLOAD
  2095. /**
  2096. * dp_reo_remap_config() - configure reo remap register value based
  2097. * nss configuration.
  2098. * based on offload_radio value below remap configuration
  2099. * get applied.
  2100. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2101. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2102. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2103. * 3 - both Radios handled by NSS (remap not required)
  2104. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2105. *
  2106. * @remap1: output parameter indicates reo remap 1 register value
  2107. * @remap2: output parameter indicates reo remap 2 register value
  2108. * Return: bool type, true if remap is configured else false.
  2109. */
  2110. static bool dp_reo_remap_config(struct dp_soc *soc,
  2111. uint32_t *remap1,
  2112. uint32_t *remap2)
  2113. {
  2114. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  2115. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  2116. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  2117. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  2118. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2119. return true;
  2120. }
  2121. #else
  2122. static bool dp_reo_remap_config(struct dp_soc *soc,
  2123. uint32_t *remap1,
  2124. uint32_t *remap2)
  2125. {
  2126. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2127. switch (offload_radio) {
  2128. case dp_nss_cfg_default:
  2129. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2130. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2131. (0x3 << 18) | (0x4 << 21)) << 8;
  2132. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2133. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2134. (0x3 << 18) | (0x4 << 21)) << 8;
  2135. break;
  2136. case dp_nss_cfg_first_radio:
  2137. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  2138. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  2139. (0x2 << 18) | (0x3 << 21)) << 8;
  2140. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  2141. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  2142. (0x4 << 18) | (0x2 << 21)) << 8;
  2143. break;
  2144. case dp_nss_cfg_second_radio:
  2145. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  2146. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  2147. (0x1 << 18) | (0x3 << 21)) << 8;
  2148. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  2149. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  2150. (0x4 << 18) | (0x1 << 21)) << 8;
  2151. break;
  2152. case dp_nss_cfg_dbdc:
  2153. case dp_nss_cfg_dbtc:
  2154. /* return false if both or all are offloaded to NSS */
  2155. return false;
  2156. }
  2157. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2158. *remap1, *remap2, offload_radio);
  2159. return true;
  2160. }
  2161. #endif
  2162. /*
  2163. * dp_reo_frag_dst_set() - configure reo register to set the
  2164. * fragment destination ring
  2165. * @soc : Datapath soc
  2166. * @frag_dst_ring : output parameter to set fragment destination ring
  2167. *
  2168. * Based on offload_radio below fragment destination rings is selected
  2169. * 0 - TCL
  2170. * 1 - SW1
  2171. * 2 - SW2
  2172. * 3 - SW3
  2173. * 4 - SW4
  2174. * 5 - Release
  2175. * 6 - FW
  2176. * 7 - alternate select
  2177. *
  2178. * return: void
  2179. */
  2180. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2181. {
  2182. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2183. switch (offload_radio) {
  2184. case dp_nss_cfg_default:
  2185. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2186. break;
  2187. case dp_nss_cfg_dbdc:
  2188. case dp_nss_cfg_dbtc:
  2189. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2190. break;
  2191. default:
  2192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2193. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2194. break;
  2195. }
  2196. }
  2197. /*
  2198. * dp_soc_cmn_setup() - Common SoC level initializion
  2199. * @soc: Datapath SOC handle
  2200. *
  2201. * This is an internal function used to setup common SOC data structures,
  2202. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2203. */
  2204. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2205. {
  2206. int i;
  2207. struct hal_reo_params reo_params;
  2208. int tx_ring_size;
  2209. int tx_comp_ring_size;
  2210. int reo_dst_ring_size;
  2211. uint32_t entries;
  2212. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2213. if (qdf_atomic_read(&soc->cmn_init_done))
  2214. return 0;
  2215. if (dp_hw_link_desc_pool_setup(soc))
  2216. goto fail1;
  2217. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2218. /* Setup SRNG rings */
  2219. /* Common rings */
  2220. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2221. wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
  2222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2223. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2224. goto fail1;
  2225. }
  2226. soc->num_tcl_data_rings = 0;
  2227. /* Tx data rings */
  2228. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2229. soc->num_tcl_data_rings =
  2230. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2231. tx_comp_ring_size =
  2232. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2233. tx_ring_size =
  2234. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2235. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2236. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2237. TCL_DATA, i, 0, tx_ring_size)) {
  2238. QDF_TRACE(QDF_MODULE_ID_DP,
  2239. QDF_TRACE_LEVEL_ERROR,
  2240. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2241. goto fail1;
  2242. }
  2243. /*
  2244. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2245. * count
  2246. */
  2247. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2248. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  2249. QDF_TRACE(QDF_MODULE_ID_DP,
  2250. QDF_TRACE_LEVEL_ERROR,
  2251. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2252. goto fail1;
  2253. }
  2254. }
  2255. } else {
  2256. /* This will be incremented during per pdev ring setup */
  2257. soc->num_tcl_data_rings = 0;
  2258. }
  2259. if (dp_tx_soc_attach(soc)) {
  2260. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2261. FL("dp_tx_soc_attach failed"));
  2262. goto fail1;
  2263. }
  2264. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2265. /* TCL command and status rings */
  2266. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2267. entries)) {
  2268. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2269. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2270. goto fail1;
  2271. }
  2272. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2273. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2274. entries)) {
  2275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2276. FL("dp_srng_setup failed for tcl_status_ring"));
  2277. goto fail1;
  2278. }
  2279. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2280. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2281. * descriptors
  2282. */
  2283. /* Rx data rings */
  2284. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2285. soc->num_reo_dest_rings =
  2286. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2287. QDF_TRACE(QDF_MODULE_ID_DP,
  2288. QDF_TRACE_LEVEL_INFO,
  2289. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2290. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2291. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2292. i, 0, reo_dst_ring_size)) {
  2293. QDF_TRACE(QDF_MODULE_ID_DP,
  2294. QDF_TRACE_LEVEL_ERROR,
  2295. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2296. goto fail1;
  2297. }
  2298. }
  2299. } else {
  2300. /* This will be incremented during per pdev ring setup */
  2301. soc->num_reo_dest_rings = 0;
  2302. }
  2303. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2304. /* LMAC RxDMA to SW Rings configuration */
  2305. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2306. /* Only valid for MCL */
  2307. struct dp_pdev *pdev = soc->pdev_list[0];
  2308. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2309. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2310. RXDMA_DST, 0, i,
  2311. entries)) {
  2312. QDF_TRACE(QDF_MODULE_ID_DP,
  2313. QDF_TRACE_LEVEL_ERROR,
  2314. FL(RNG_ERR "rxdma_err_dst_ring"));
  2315. goto fail1;
  2316. }
  2317. }
  2318. }
  2319. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2320. /* REO reinjection ring */
  2321. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2322. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2323. entries)) {
  2324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2325. FL("dp_srng_setup failed for reo_reinject_ring"));
  2326. goto fail1;
  2327. }
  2328. /* Rx release ring */
  2329. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2330. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
  2331. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2332. FL("dp_srng_setup failed for rx_rel_ring"));
  2333. goto fail1;
  2334. }
  2335. /* Rx exception ring */
  2336. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2337. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2338. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
  2339. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2340. FL("dp_srng_setup failed for reo_exception_ring"));
  2341. goto fail1;
  2342. }
  2343. /* REO command and status rings */
  2344. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2345. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
  2346. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2347. FL("dp_srng_setup failed for reo_cmd_ring"));
  2348. goto fail1;
  2349. }
  2350. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2351. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2352. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2353. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2354. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
  2355. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2356. FL("dp_srng_setup failed for reo_status_ring"));
  2357. goto fail1;
  2358. }
  2359. /* Reset the cpu ring map if radio is NSS offloaded */
  2360. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2361. dp_soc_reset_cpu_ring_map(soc);
  2362. dp_soc_reset_intr_mask(soc);
  2363. }
  2364. /* Setup HW REO */
  2365. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2366. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2367. /*
  2368. * Reo ring remap is not required if both radios
  2369. * are offloaded to NSS
  2370. */
  2371. if (!dp_reo_remap_config(soc,
  2372. &reo_params.remap1,
  2373. &reo_params.remap2))
  2374. goto out;
  2375. reo_params.rx_hash_enabled = true;
  2376. }
  2377. /* setup the global rx defrag waitlist */
  2378. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2379. soc->rx.defrag.timeout_ms =
  2380. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2381. soc->rx.flags.defrag_timeout_check =
  2382. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2383. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2384. out:
  2385. /*
  2386. * set the fragment destination ring
  2387. */
  2388. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2389. hal_reo_setup(soc->hal_soc, &reo_params);
  2390. qdf_atomic_set(&soc->cmn_init_done, 1);
  2391. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2392. return 0;
  2393. fail1:
  2394. /*
  2395. * Cleanup will be done as part of soc_detach, which will
  2396. * be called on pdev attach failure
  2397. */
  2398. return QDF_STATUS_E_FAILURE;
  2399. }
  2400. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  2401. static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2402. {
  2403. struct cdp_lro_hash_config lro_hash;
  2404. QDF_STATUS status;
  2405. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2406. !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
  2407. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2408. dp_err("LRO, GRO and RX hash disabled");
  2409. return QDF_STATUS_E_FAILURE;
  2410. }
  2411. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2412. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
  2413. wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
  2414. lro_hash.lro_enable = 1;
  2415. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2416. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2417. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2418. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2419. }
  2420. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2421. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2422. LRO_IPV4_SEED_ARR_SZ));
  2423. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2424. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2425. LRO_IPV6_SEED_ARR_SZ));
  2426. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2427. if (!soc->cdp_soc.ol_ops->lro_hash_config) {
  2428. QDF_BUG(0);
  2429. dp_err("lro_hash_config not configured");
  2430. return QDF_STATUS_E_FAILURE;
  2431. }
  2432. status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
  2433. &lro_hash);
  2434. if (!QDF_IS_STATUS_SUCCESS(status)) {
  2435. dp_err("failed to send lro_hash_config to FW %u", status);
  2436. return status;
  2437. }
  2438. dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2439. lro_hash.lro_enable, lro_hash.tcp_flag,
  2440. lro_hash.tcp_flag_mask);
  2441. dp_info("toeplitz_hash_ipv4:");
  2442. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2443. (void *)lro_hash.toeplitz_hash_ipv4,
  2444. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2445. LRO_IPV4_SEED_ARR_SZ));
  2446. dp_info("toeplitz_hash_ipv6:");
  2447. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2448. (void *)lro_hash.toeplitz_hash_ipv6,
  2449. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2450. LRO_IPV6_SEED_ARR_SZ));
  2451. return status;
  2452. }
  2453. /*
  2454. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2455. * @soc: data path SoC handle
  2456. * @pdev: Physical device handle
  2457. *
  2458. * Return: 0 - success, > 0 - failure
  2459. */
  2460. #ifdef QCA_HOST2FW_RXBUF_RING
  2461. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2462. struct dp_pdev *pdev)
  2463. {
  2464. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2465. int max_mac_rings;
  2466. int i;
  2467. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2468. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2469. for (i = 0; i < max_mac_rings; i++) {
  2470. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2471. "%s: pdev_id %d mac_id %d",
  2472. __func__, pdev->pdev_id, i);
  2473. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2474. RXDMA_BUF, 1, i,
  2475. wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
  2476. QDF_TRACE(QDF_MODULE_ID_DP,
  2477. QDF_TRACE_LEVEL_ERROR,
  2478. FL("failed rx mac ring setup"));
  2479. return QDF_STATUS_E_FAILURE;
  2480. }
  2481. }
  2482. return QDF_STATUS_SUCCESS;
  2483. }
  2484. #else
  2485. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2486. struct dp_pdev *pdev)
  2487. {
  2488. return QDF_STATUS_SUCCESS;
  2489. }
  2490. #endif
  2491. /**
  2492. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2493. * @pdev - DP_PDEV handle
  2494. *
  2495. * Return: void
  2496. */
  2497. static inline void
  2498. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2499. {
  2500. uint8_t map_id;
  2501. struct dp_soc *soc = pdev->soc;
  2502. if (!soc)
  2503. return;
  2504. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2505. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2506. default_dscp_tid_map,
  2507. sizeof(default_dscp_tid_map));
  2508. }
  2509. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2510. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2511. default_dscp_tid_map,
  2512. map_id);
  2513. }
  2514. }
  2515. #ifdef IPA_OFFLOAD
  2516. /**
  2517. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2518. * @soc: data path instance
  2519. * @pdev: core txrx pdev context
  2520. *
  2521. * Return: QDF_STATUS_SUCCESS: success
  2522. * QDF_STATUS_E_RESOURCES: Error return
  2523. */
  2524. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2525. struct dp_pdev *pdev)
  2526. {
  2527. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2528. int entries;
  2529. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2530. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2531. /* Setup second Rx refill buffer ring */
  2532. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2533. IPA_RX_REFILL_BUF_RING_IDX,
  2534. pdev->pdev_id,
  2535. entries)) {
  2536. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2537. FL("dp_srng_setup failed second rx refill ring"));
  2538. return QDF_STATUS_E_FAILURE;
  2539. }
  2540. return QDF_STATUS_SUCCESS;
  2541. }
  2542. /**
  2543. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2544. * @soc: data path instance
  2545. * @pdev: core txrx pdev context
  2546. *
  2547. * Return: void
  2548. */
  2549. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2550. struct dp_pdev *pdev)
  2551. {
  2552. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2553. IPA_RX_REFILL_BUF_RING_IDX);
  2554. }
  2555. #else
  2556. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2557. struct dp_pdev *pdev)
  2558. {
  2559. return QDF_STATUS_SUCCESS;
  2560. }
  2561. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2562. struct dp_pdev *pdev)
  2563. {
  2564. }
  2565. #endif
  2566. #if !defined(DISABLE_MON_CONFIG)
  2567. /**
  2568. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2569. * @soc: soc handle
  2570. * @pdev: physical device handle
  2571. *
  2572. * Return: nonzero on failure and zero on success
  2573. */
  2574. static
  2575. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2576. {
  2577. int mac_id = 0;
  2578. int pdev_id = pdev->pdev_id;
  2579. int entries;
  2580. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2581. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2582. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2583. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2584. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2585. entries =
  2586. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2587. if (dp_srng_setup(soc,
  2588. &pdev->rxdma_mon_buf_ring[mac_id],
  2589. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2590. entries)) {
  2591. QDF_TRACE(QDF_MODULE_ID_DP,
  2592. QDF_TRACE_LEVEL_ERROR,
  2593. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2594. return QDF_STATUS_E_NOMEM;
  2595. }
  2596. entries =
  2597. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2598. if (dp_srng_setup(soc,
  2599. &pdev->rxdma_mon_dst_ring[mac_id],
  2600. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2601. entries)) {
  2602. QDF_TRACE(QDF_MODULE_ID_DP,
  2603. QDF_TRACE_LEVEL_ERROR,
  2604. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2605. return QDF_STATUS_E_NOMEM;
  2606. }
  2607. entries =
  2608. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2609. if (dp_srng_setup(soc,
  2610. &pdev->rxdma_mon_status_ring[mac_id],
  2611. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2612. entries)) {
  2613. QDF_TRACE(QDF_MODULE_ID_DP,
  2614. QDF_TRACE_LEVEL_ERROR,
  2615. FL(RNG_ERR "rxdma_mon_status_ring"));
  2616. return QDF_STATUS_E_NOMEM;
  2617. }
  2618. entries =
  2619. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2620. if (dp_srng_setup(soc,
  2621. &pdev->rxdma_mon_desc_ring[mac_id],
  2622. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2623. entries)) {
  2624. QDF_TRACE(QDF_MODULE_ID_DP,
  2625. QDF_TRACE_LEVEL_ERROR,
  2626. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2627. return QDF_STATUS_E_NOMEM;
  2628. }
  2629. } else {
  2630. entries =
  2631. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2632. if (dp_srng_setup(soc,
  2633. &pdev->rxdma_mon_status_ring[mac_id],
  2634. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2635. entries)) {
  2636. QDF_TRACE(QDF_MODULE_ID_DP,
  2637. QDF_TRACE_LEVEL_ERROR,
  2638. FL(RNG_ERR "rxdma_mon_status_ring"));
  2639. return QDF_STATUS_E_NOMEM;
  2640. }
  2641. }
  2642. }
  2643. return QDF_STATUS_SUCCESS;
  2644. }
  2645. #else
  2646. static
  2647. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2648. {
  2649. return QDF_STATUS_SUCCESS;
  2650. }
  2651. #endif
  2652. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  2653. * @pdev_hdl: pdev handle
  2654. */
  2655. #ifdef ATH_SUPPORT_EXT_STAT
  2656. void dp_iterate_update_peer_list(void *pdev_hdl)
  2657. {
  2658. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  2659. struct dp_vdev *vdev = NULL;
  2660. struct dp_peer *peer = NULL;
  2661. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  2662. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  2663. dp_cal_client_update_peer_stats(&peer->stats);
  2664. }
  2665. }
  2666. }
  2667. #else
  2668. void dp_iterate_update_peer_list(void *pdev_hdl)
  2669. {
  2670. }
  2671. #endif
  2672. /*
  2673. * dp_pdev_attach_wifi3() - attach txrx pdev
  2674. * @ctrl_pdev: Opaque PDEV object
  2675. * @txrx_soc: Datapath SOC handle
  2676. * @htc_handle: HTC handle for host-target interface
  2677. * @qdf_osdev: QDF OS device
  2678. * @pdev_id: PDEV ID
  2679. *
  2680. * Return: DP PDEV handle on success, NULL on failure
  2681. */
  2682. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  2683. struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  2684. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  2685. {
  2686. int tx_ring_size;
  2687. int tx_comp_ring_size;
  2688. int reo_dst_ring_size;
  2689. int entries;
  2690. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2691. int nss_cfg;
  2692. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2693. struct dp_pdev *pdev = NULL;
  2694. if (soc->dp_soc_reinit)
  2695. pdev = soc->pdev_list[pdev_id];
  2696. else
  2697. pdev = qdf_mem_malloc(sizeof(*pdev));
  2698. if (!pdev) {
  2699. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2700. FL("DP PDEV memory allocation failed"));
  2701. goto fail0;
  2702. }
  2703. /*
  2704. * Variable to prevent double pdev deinitialization during
  2705. * radio detach execution .i.e. in the absence of any vdev.
  2706. */
  2707. pdev->pdev_deinit = 0;
  2708. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  2709. if (!pdev->invalid_peer) {
  2710. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2711. FL("Invalid peer memory allocation failed"));
  2712. qdf_mem_free(pdev);
  2713. goto fail0;
  2714. }
  2715. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2716. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  2717. if (!pdev->wlan_cfg_ctx) {
  2718. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2719. FL("pdev cfg_attach failed"));
  2720. qdf_mem_free(pdev->invalid_peer);
  2721. qdf_mem_free(pdev);
  2722. goto fail0;
  2723. }
  2724. /*
  2725. * set nss pdev config based on soc config
  2726. */
  2727. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  2728. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  2729. (nss_cfg & (1 << pdev_id)));
  2730. pdev->soc = soc;
  2731. pdev->ctrl_pdev = ctrl_pdev;
  2732. pdev->pdev_id = pdev_id;
  2733. soc->pdev_list[pdev_id] = pdev;
  2734. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  2735. soc->pdev_count++;
  2736. TAILQ_INIT(&pdev->vdev_list);
  2737. qdf_spinlock_create(&pdev->vdev_list_lock);
  2738. pdev->vdev_count = 0;
  2739. qdf_spinlock_create(&pdev->tx_mutex);
  2740. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  2741. TAILQ_INIT(&pdev->neighbour_peers_list);
  2742. pdev->neighbour_peers_added = false;
  2743. if (dp_soc_cmn_setup(soc)) {
  2744. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2745. FL("dp_soc_cmn_setup failed"));
  2746. goto fail1;
  2747. }
  2748. /* Setup per PDEV TCL rings if configured */
  2749. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2750. tx_ring_size =
  2751. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2752. tx_comp_ring_size =
  2753. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2754. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  2755. pdev_id, pdev_id, tx_ring_size)) {
  2756. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2757. FL("dp_srng_setup failed for tcl_data_ring"));
  2758. goto fail1;
  2759. }
  2760. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  2761. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  2762. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2763. FL("dp_srng_setup failed for tx_comp_ring"));
  2764. goto fail1;
  2765. }
  2766. soc->num_tcl_data_rings++;
  2767. }
  2768. /* Tx specific init */
  2769. if (dp_tx_pdev_attach(pdev)) {
  2770. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2771. FL("dp_tx_pdev_attach failed"));
  2772. goto fail1;
  2773. }
  2774. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2775. /* Setup per PDEV REO rings if configured */
  2776. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2777. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  2778. pdev_id, pdev_id, reo_dst_ring_size)) {
  2779. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2780. FL("dp_srng_setup failed for reo_dest_ringn"));
  2781. goto fail1;
  2782. }
  2783. soc->num_reo_dest_rings++;
  2784. }
  2785. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  2786. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
  2787. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2788. FL("dp_srng_setup failed rx refill ring"));
  2789. goto fail1;
  2790. }
  2791. if (dp_rxdma_ring_setup(soc, pdev)) {
  2792. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2793. FL("RXDMA ring config failed"));
  2794. goto fail1;
  2795. }
  2796. if (dp_mon_rings_setup(soc, pdev)) {
  2797. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2798. FL("MONITOR rings setup failed"));
  2799. goto fail1;
  2800. }
  2801. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2802. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2803. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  2804. 0, pdev_id,
  2805. entries)) {
  2806. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2807. FL(RNG_ERR "rxdma_err_dst_ring"));
  2808. goto fail1;
  2809. }
  2810. }
  2811. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  2812. goto fail1;
  2813. if (dp_ipa_ring_resource_setup(soc, pdev))
  2814. goto fail1;
  2815. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  2816. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2817. FL("dp_ipa_uc_attach failed"));
  2818. goto fail1;
  2819. }
  2820. /* Rx specific init */
  2821. if (dp_rx_pdev_attach(pdev)) {
  2822. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2823. FL("dp_rx_pdev_attach failed"));
  2824. goto fail1;
  2825. }
  2826. DP_STATS_INIT(pdev);
  2827. /* Monitor filter init */
  2828. pdev->mon_filter_mode = MON_FILTER_ALL;
  2829. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  2830. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  2831. pdev->fp_data_filter = FILTER_DATA_ALL;
  2832. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  2833. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  2834. pdev->mo_data_filter = FILTER_DATA_ALL;
  2835. dp_local_peer_id_pool_init(pdev);
  2836. dp_dscp_tid_map_setup(pdev);
  2837. /* Rx monitor mode specific init */
  2838. if (dp_rx_pdev_mon_attach(pdev)) {
  2839. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2840. "dp_rx_pdev_mon_attach failed");
  2841. goto fail1;
  2842. }
  2843. if (dp_wdi_event_attach(pdev)) {
  2844. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2845. "dp_wdi_evet_attach failed");
  2846. goto fail1;
  2847. }
  2848. /* set the reo destination during initialization */
  2849. pdev->reo_dest = pdev->pdev_id + 1;
  2850. /*
  2851. * initialize ppdu tlv list
  2852. */
  2853. TAILQ_INIT(&pdev->ppdu_info_list);
  2854. pdev->tlv_count = 0;
  2855. pdev->list_depth = 0;
  2856. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  2857. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  2858. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  2859. TRUE);
  2860. /* initlialize cal client timer */
  2861. dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
  2862. &dp_iterate_update_peer_list);
  2863. return (struct cdp_pdev *)pdev;
  2864. fail1:
  2865. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  2866. fail0:
  2867. return NULL;
  2868. }
  2869. /*
  2870. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2871. * @soc: data path SoC handle
  2872. * @pdev: Physical device handle
  2873. *
  2874. * Return: void
  2875. */
  2876. #ifdef QCA_HOST2FW_RXBUF_RING
  2877. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2878. struct dp_pdev *pdev)
  2879. {
  2880. int max_mac_rings =
  2881. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2882. int i;
  2883. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2884. max_mac_rings : MAX_RX_MAC_RINGS;
  2885. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2886. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2887. RXDMA_BUF, 1);
  2888. qdf_timer_free(&soc->mon_reap_timer);
  2889. }
  2890. #else
  2891. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2892. struct dp_pdev *pdev)
  2893. {
  2894. }
  2895. #endif
  2896. /*
  2897. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2898. * @pdev: device object
  2899. *
  2900. * Return: void
  2901. */
  2902. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  2903. {
  2904. struct dp_neighbour_peer *peer = NULL;
  2905. struct dp_neighbour_peer *temp_peer = NULL;
  2906. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  2907. neighbour_peer_list_elem, temp_peer) {
  2908. /* delete this peer from the list */
  2909. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  2910. peer, neighbour_peer_list_elem);
  2911. qdf_mem_free(peer);
  2912. }
  2913. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  2914. }
  2915. /**
  2916. * dp_htt_ppdu_stats_detach() - detach stats resources
  2917. * @pdev: Datapath PDEV handle
  2918. *
  2919. * Return: void
  2920. */
  2921. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  2922. {
  2923. struct ppdu_info *ppdu_info, *ppdu_info_next;
  2924. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  2925. ppdu_info_list_elem, ppdu_info_next) {
  2926. if (!ppdu_info)
  2927. break;
  2928. qdf_assert_always(ppdu_info->nbuf);
  2929. qdf_nbuf_free(ppdu_info->nbuf);
  2930. qdf_mem_free(ppdu_info);
  2931. }
  2932. }
  2933. #if !defined(DISABLE_MON_CONFIG)
  2934. static
  2935. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2936. int mac_id)
  2937. {
  2938. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2939. dp_srng_cleanup(soc,
  2940. &pdev->rxdma_mon_buf_ring[mac_id],
  2941. RXDMA_MONITOR_BUF, 0);
  2942. dp_srng_cleanup(soc,
  2943. &pdev->rxdma_mon_dst_ring[mac_id],
  2944. RXDMA_MONITOR_DST, 0);
  2945. dp_srng_cleanup(soc,
  2946. &pdev->rxdma_mon_status_ring[mac_id],
  2947. RXDMA_MONITOR_STATUS, 0);
  2948. dp_srng_cleanup(soc,
  2949. &pdev->rxdma_mon_desc_ring[mac_id],
  2950. RXDMA_MONITOR_DESC, 0);
  2951. dp_srng_cleanup(soc,
  2952. &pdev->rxdma_err_dst_ring[mac_id],
  2953. RXDMA_DST, 0);
  2954. } else {
  2955. dp_srng_cleanup(soc,
  2956. &pdev->rxdma_mon_status_ring[mac_id],
  2957. RXDMA_MONITOR_STATUS, 0);
  2958. dp_srng_cleanup(soc,
  2959. &pdev->rxdma_err_dst_ring[mac_id],
  2960. RXDMA_DST, 0);
  2961. }
  2962. }
  2963. #else
  2964. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2965. int mac_id)
  2966. {
  2967. }
  2968. #endif
  2969. /**
  2970. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  2971. *
  2972. * @soc: soc handle
  2973. * @pdev: datapath physical dev handle
  2974. * @mac_id: mac number
  2975. *
  2976. * Return: None
  2977. */
  2978. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  2979. int mac_id)
  2980. {
  2981. }
  2982. /**
  2983. * dp_pdev_mem_reset() - Reset txrx pdev memory
  2984. * @pdev: dp pdev handle
  2985. *
  2986. * Return: None
  2987. */
  2988. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  2989. {
  2990. uint16_t len = 0;
  2991. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  2992. len = sizeof(struct dp_pdev) -
  2993. offsetof(struct dp_pdev, pdev_deinit) -
  2994. sizeof(pdev->pdev_deinit);
  2995. dp_pdev_offset = dp_pdev_offset +
  2996. offsetof(struct dp_pdev, pdev_deinit) +
  2997. sizeof(pdev->pdev_deinit);
  2998. qdf_mem_zero(dp_pdev_offset, len);
  2999. }
  3000. /**
  3001. * dp_pdev_deinit() - Deinit txrx pdev
  3002. * @txrx_pdev: Datapath PDEV handle
  3003. * @force: Force deinit
  3004. *
  3005. * Return: None
  3006. */
  3007. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  3008. {
  3009. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3010. struct dp_soc *soc = pdev->soc;
  3011. qdf_nbuf_t curr_nbuf, next_nbuf;
  3012. int mac_id;
  3013. /*
  3014. * Prevent double pdev deinitialization during radio detach
  3015. * execution .i.e. in the absence of any vdev
  3016. */
  3017. if (pdev->pdev_deinit)
  3018. return;
  3019. pdev->pdev_deinit = 1;
  3020. dp_wdi_event_detach(pdev);
  3021. dp_tx_pdev_detach(pdev);
  3022. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3023. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3024. TCL_DATA, pdev->pdev_id);
  3025. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3026. WBM2SW_RELEASE, pdev->pdev_id);
  3027. }
  3028. dp_pktlogmod_exit(pdev);
  3029. dp_rx_pdev_detach(pdev);
  3030. dp_rx_pdev_mon_detach(pdev);
  3031. dp_neighbour_peers_detach(pdev);
  3032. qdf_spinlock_destroy(&pdev->tx_mutex);
  3033. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3034. dp_ipa_uc_detach(soc, pdev);
  3035. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3036. /* Cleanup per PDEV REO rings if configured */
  3037. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3038. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3039. REO_DST, pdev->pdev_id);
  3040. }
  3041. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3042. dp_rxdma_ring_cleanup(soc, pdev);
  3043. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3044. dp_mon_ring_deinit(soc, pdev, mac_id);
  3045. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3046. RXDMA_DST, 0);
  3047. }
  3048. curr_nbuf = pdev->invalid_peer_head_msdu;
  3049. while (curr_nbuf) {
  3050. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3051. qdf_nbuf_free(curr_nbuf);
  3052. curr_nbuf = next_nbuf;
  3053. }
  3054. pdev->invalid_peer_head_msdu = NULL;
  3055. pdev->invalid_peer_tail_msdu = NULL;
  3056. dp_htt_ppdu_stats_detach(pdev);
  3057. qdf_nbuf_free(pdev->sojourn_buf);
  3058. dp_cal_client_detach(&pdev->cal_client_ctx);
  3059. soc->pdev_count--;
  3060. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3061. qdf_mem_free(pdev->invalid_peer);
  3062. qdf_mem_free(pdev->dp_txrx_handle);
  3063. dp_pdev_mem_reset(pdev);
  3064. }
  3065. /**
  3066. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3067. * @txrx_pdev: Datapath PDEV handle
  3068. * @force: Force deinit
  3069. *
  3070. * Return: None
  3071. */
  3072. static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3073. {
  3074. dp_pdev_deinit(txrx_pdev, force);
  3075. }
  3076. /*
  3077. * dp_pdev_detach() - Complete rest of pdev detach
  3078. * @txrx_pdev: Datapath PDEV handle
  3079. * @force: Force deinit
  3080. *
  3081. * Return: None
  3082. */
  3083. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3084. {
  3085. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3086. struct dp_soc *soc = pdev->soc;
  3087. int mac_id;
  3088. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3089. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3090. TCL_DATA, pdev->pdev_id);
  3091. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3092. WBM2SW_RELEASE, pdev->pdev_id);
  3093. }
  3094. dp_mon_link_free(pdev);
  3095. /* Cleanup per PDEV REO rings if configured */
  3096. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3097. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3098. REO_DST, pdev->pdev_id);
  3099. }
  3100. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3101. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3102. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3103. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3104. RXDMA_DST, 0);
  3105. }
  3106. soc->pdev_list[pdev->pdev_id] = NULL;
  3107. qdf_mem_free(pdev);
  3108. }
  3109. /*
  3110. * dp_pdev_detach_wifi3() - detach txrx pdev
  3111. * @txrx_pdev: Datapath PDEV handle
  3112. * @force: Force detach
  3113. *
  3114. * Return: None
  3115. */
  3116. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3117. {
  3118. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3119. struct dp_soc *soc = pdev->soc;
  3120. if (soc->dp_soc_reinit) {
  3121. dp_pdev_detach(txrx_pdev, force);
  3122. } else {
  3123. dp_pdev_deinit(txrx_pdev, force);
  3124. dp_pdev_detach(txrx_pdev, force);
  3125. }
  3126. }
  3127. /*
  3128. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3129. * @soc: DP SOC handle
  3130. */
  3131. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3132. {
  3133. struct reo_desc_list_node *desc;
  3134. struct dp_rx_tid *rx_tid;
  3135. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3136. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3137. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3138. rx_tid = &desc->rx_tid;
  3139. qdf_mem_unmap_nbytes_single(soc->osdev,
  3140. rx_tid->hw_qdesc_paddr,
  3141. QDF_DMA_BIDIRECTIONAL,
  3142. rx_tid->hw_qdesc_alloc_size);
  3143. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3144. qdf_mem_free(desc);
  3145. }
  3146. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3147. qdf_list_destroy(&soc->reo_desc_freelist);
  3148. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3149. }
  3150. /**
  3151. * dp_soc_mem_reset() - Reset Dp Soc memory
  3152. * @soc: DP handle
  3153. *
  3154. * Return: None
  3155. */
  3156. static void dp_soc_mem_reset(struct dp_soc *soc)
  3157. {
  3158. uint16_t len = 0;
  3159. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3160. len = sizeof(struct dp_soc) -
  3161. offsetof(struct dp_soc, dp_soc_reinit) -
  3162. sizeof(soc->dp_soc_reinit);
  3163. dp_soc_offset = dp_soc_offset +
  3164. offsetof(struct dp_soc, dp_soc_reinit) +
  3165. sizeof(soc->dp_soc_reinit);
  3166. qdf_mem_zero(dp_soc_offset, len);
  3167. }
  3168. /**
  3169. * dp_soc_deinit() - Deinitialize txrx SOC
  3170. * @txrx_soc: Opaque DP SOC handle
  3171. *
  3172. * Return: None
  3173. */
  3174. static void dp_soc_deinit(void *txrx_soc)
  3175. {
  3176. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3177. int i;
  3178. qdf_atomic_set(&soc->cmn_init_done, 0);
  3179. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3180. if (soc->pdev_list[i])
  3181. dp_pdev_deinit((struct cdp_pdev *)
  3182. soc->pdev_list[i], 1);
  3183. }
  3184. qdf_flush_work(&soc->htt_stats.work);
  3185. qdf_disable_work(&soc->htt_stats.work);
  3186. /* Free pending htt stats messages */
  3187. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3188. dp_reo_cmdlist_destroy(soc);
  3189. dp_peer_find_detach(soc);
  3190. /* Free the ring memories */
  3191. /* Common rings */
  3192. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3193. /* Tx data rings */
  3194. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3195. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3196. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3197. TCL_DATA, i);
  3198. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3199. WBM2SW_RELEASE, i);
  3200. }
  3201. }
  3202. /* TCL command and status rings */
  3203. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3204. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3205. /* Rx data rings */
  3206. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3207. soc->num_reo_dest_rings =
  3208. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3209. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3210. /* TODO: Get number of rings and ring sizes
  3211. * from wlan_cfg
  3212. */
  3213. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3214. REO_DST, i);
  3215. }
  3216. }
  3217. /* REO reinjection ring */
  3218. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3219. /* Rx release ring */
  3220. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3221. /* Rx exception ring */
  3222. /* TODO: Better to store ring_type and ring_num in
  3223. * dp_srng during setup
  3224. */
  3225. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3226. /* REO command and status rings */
  3227. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3228. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3229. dp_soc_wds_detach(soc);
  3230. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3231. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3232. htt_soc_htc_dealloc(soc->htt_handle);
  3233. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  3234. dp_reo_cmdlist_destroy(soc);
  3235. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  3236. dp_reo_desc_freelist_destroy(soc);
  3237. qdf_spinlock_destroy(&soc->ast_lock);
  3238. dp_soc_mem_reset(soc);
  3239. }
  3240. /**
  3241. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3242. * @txrx_soc: Opaque DP SOC handle
  3243. *
  3244. * Return: None
  3245. */
  3246. static void dp_soc_deinit_wifi3(void *txrx_soc)
  3247. {
  3248. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3249. soc->dp_soc_reinit = 1;
  3250. dp_soc_deinit(txrx_soc);
  3251. }
  3252. /*
  3253. * dp_soc_detach() - Detach rest of txrx SOC
  3254. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3255. *
  3256. * Return: None
  3257. */
  3258. static void dp_soc_detach(void *txrx_soc)
  3259. {
  3260. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3261. int i;
  3262. qdf_atomic_set(&soc->cmn_init_done, 0);
  3263. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3264. * SW descriptors
  3265. */
  3266. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3267. if (soc->pdev_list[i])
  3268. dp_pdev_detach((struct cdp_pdev *)
  3269. soc->pdev_list[i], 1);
  3270. }
  3271. /* Free the ring memories */
  3272. /* Common rings */
  3273. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3274. dp_tx_soc_detach(soc);
  3275. /* Tx data rings */
  3276. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3277. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3278. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3279. TCL_DATA, i);
  3280. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3281. WBM2SW_RELEASE, i);
  3282. }
  3283. }
  3284. /* TCL command and status rings */
  3285. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3286. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3287. /* Rx data rings */
  3288. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3289. soc->num_reo_dest_rings =
  3290. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3291. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3292. /* TODO: Get number of rings and ring sizes
  3293. * from wlan_cfg
  3294. */
  3295. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3296. REO_DST, i);
  3297. }
  3298. }
  3299. /* REO reinjection ring */
  3300. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3301. /* Rx release ring */
  3302. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3303. /* Rx exception ring */
  3304. /* TODO: Better to store ring_type and ring_num in
  3305. * dp_srng during setup
  3306. */
  3307. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3308. /* REO command and status rings */
  3309. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3310. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3311. dp_hw_link_desc_pool_cleanup(soc);
  3312. htt_soc_detach(soc->htt_handle);
  3313. soc->dp_soc_reinit = 0;
  3314. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3315. qdf_mem_free(soc);
  3316. }
  3317. /*
  3318. * dp_soc_detach_wifi3() - Detach txrx SOC
  3319. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3320. *
  3321. * Return: None
  3322. */
  3323. static void dp_soc_detach_wifi3(void *txrx_soc)
  3324. {
  3325. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3326. if (soc->dp_soc_reinit) {
  3327. dp_soc_detach(txrx_soc);
  3328. } else {
  3329. dp_soc_deinit(txrx_soc);
  3330. dp_soc_detach(txrx_soc);
  3331. }
  3332. }
  3333. #if !defined(DISABLE_MON_CONFIG)
  3334. /**
  3335. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3336. * @soc: soc handle
  3337. * @pdev: physical device handle
  3338. * @mac_id: ring number
  3339. * @mac_for_pdev: mac_id
  3340. *
  3341. * Return: non-zero for failure, zero for success
  3342. */
  3343. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3344. struct dp_pdev *pdev,
  3345. int mac_id,
  3346. int mac_for_pdev)
  3347. {
  3348. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3349. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3350. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3351. pdev->rxdma_mon_buf_ring[mac_id]
  3352. .hal_srng,
  3353. RXDMA_MONITOR_BUF);
  3354. if (status != QDF_STATUS_SUCCESS) {
  3355. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3356. return status;
  3357. }
  3358. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3359. pdev->rxdma_mon_dst_ring[mac_id]
  3360. .hal_srng,
  3361. RXDMA_MONITOR_DST);
  3362. if (status != QDF_STATUS_SUCCESS) {
  3363. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3364. return status;
  3365. }
  3366. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3367. pdev->rxdma_mon_status_ring[mac_id]
  3368. .hal_srng,
  3369. RXDMA_MONITOR_STATUS);
  3370. if (status != QDF_STATUS_SUCCESS) {
  3371. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3372. return status;
  3373. }
  3374. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3375. pdev->rxdma_mon_desc_ring[mac_id]
  3376. .hal_srng,
  3377. RXDMA_MONITOR_DESC);
  3378. if (status != QDF_STATUS_SUCCESS) {
  3379. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3380. return status;
  3381. }
  3382. } else {
  3383. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3384. pdev->rxdma_mon_status_ring[mac_id]
  3385. .hal_srng,
  3386. RXDMA_MONITOR_STATUS);
  3387. if (status != QDF_STATUS_SUCCESS) {
  3388. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3389. return status;
  3390. }
  3391. }
  3392. return status;
  3393. }
  3394. #else
  3395. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3396. struct dp_pdev *pdev,
  3397. int mac_id,
  3398. int mac_for_pdev)
  3399. {
  3400. return QDF_STATUS_SUCCESS;
  3401. }
  3402. #endif
  3403. /*
  3404. * dp_rxdma_ring_config() - configure the RX DMA rings
  3405. *
  3406. * This function is used to configure the MAC rings.
  3407. * On MCL host provides buffers in Host2FW ring
  3408. * FW refills (copies) buffers to the ring and updates
  3409. * ring_idx in register
  3410. *
  3411. * @soc: data path SoC handle
  3412. *
  3413. * Return: zero on success, non-zero on failure
  3414. */
  3415. #ifdef QCA_HOST2FW_RXBUF_RING
  3416. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3417. {
  3418. int i;
  3419. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3420. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3421. struct dp_pdev *pdev = soc->pdev_list[i];
  3422. if (pdev) {
  3423. int mac_id;
  3424. bool dbs_enable = 0;
  3425. int max_mac_rings =
  3426. wlan_cfg_get_num_mac_rings
  3427. (pdev->wlan_cfg_ctx);
  3428. htt_srng_setup(soc->htt_handle, 0,
  3429. pdev->rx_refill_buf_ring.hal_srng,
  3430. RXDMA_BUF);
  3431. if (pdev->rx_refill_buf_ring2.hal_srng)
  3432. htt_srng_setup(soc->htt_handle, 0,
  3433. pdev->rx_refill_buf_ring2.hal_srng,
  3434. RXDMA_BUF);
  3435. if (soc->cdp_soc.ol_ops->
  3436. is_hw_dbs_2x2_capable) {
  3437. dbs_enable = soc->cdp_soc.ol_ops->
  3438. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  3439. }
  3440. if (dbs_enable) {
  3441. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3442. QDF_TRACE_LEVEL_ERROR,
  3443. FL("DBS enabled max_mac_rings %d"),
  3444. max_mac_rings);
  3445. } else {
  3446. max_mac_rings = 1;
  3447. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3448. QDF_TRACE_LEVEL_ERROR,
  3449. FL("DBS disabled, max_mac_rings %d"),
  3450. max_mac_rings);
  3451. }
  3452. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3453. FL("pdev_id %d max_mac_rings %d"),
  3454. pdev->pdev_id, max_mac_rings);
  3455. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3456. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3457. mac_id, pdev->pdev_id);
  3458. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3459. QDF_TRACE_LEVEL_ERROR,
  3460. FL("mac_id %d"), mac_for_pdev);
  3461. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3462. pdev->rx_mac_buf_ring[mac_id]
  3463. .hal_srng,
  3464. RXDMA_BUF);
  3465. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3466. pdev->rxdma_err_dst_ring[mac_id]
  3467. .hal_srng,
  3468. RXDMA_DST);
  3469. /* Configure monitor mode rings */
  3470. status = dp_mon_htt_srng_setup(soc, pdev,
  3471. mac_id,
  3472. mac_for_pdev);
  3473. if (status != QDF_STATUS_SUCCESS) {
  3474. dp_err("Failed to send htt monitor messages to target");
  3475. return status;
  3476. }
  3477. }
  3478. }
  3479. }
  3480. /*
  3481. * Timer to reap rxdma status rings.
  3482. * Needed until we enable ppdu end interrupts
  3483. */
  3484. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3485. dp_service_mon_rings, (void *)soc,
  3486. QDF_TIMER_TYPE_WAKE_APPS);
  3487. soc->reap_timer_init = 1;
  3488. return status;
  3489. }
  3490. #else
  3491. /* This is only for WIN */
  3492. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3493. {
  3494. int i;
  3495. int mac_id;
  3496. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3497. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3498. struct dp_pdev *pdev = soc->pdev_list[i];
  3499. if (pdev == NULL)
  3500. continue;
  3501. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3502. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3503. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3504. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3505. #ifndef DISABLE_MON_CONFIG
  3506. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3507. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3508. RXDMA_MONITOR_BUF);
  3509. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3510. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  3511. RXDMA_MONITOR_DST);
  3512. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3513. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  3514. RXDMA_MONITOR_STATUS);
  3515. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3516. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  3517. RXDMA_MONITOR_DESC);
  3518. #endif
  3519. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3520. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  3521. RXDMA_DST);
  3522. }
  3523. }
  3524. return status;
  3525. }
  3526. #endif
  3527. /*
  3528. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  3529. * @cdp_soc: Opaque Datapath SOC handle
  3530. *
  3531. * Return: zero on success, non-zero on failure
  3532. */
  3533. static QDF_STATUS
  3534. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  3535. {
  3536. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3537. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3538. htt_soc_attach_target(soc->htt_handle);
  3539. status = dp_rxdma_ring_config(soc);
  3540. if (status != QDF_STATUS_SUCCESS) {
  3541. dp_err("Failed to send htt srng setup messages to target");
  3542. return status;
  3543. }
  3544. DP_STATS_INIT(soc);
  3545. /* initialize work queue for stats processing */
  3546. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3547. return QDF_STATUS_SUCCESS;
  3548. }
  3549. /*
  3550. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  3551. * @txrx_soc: Datapath SOC handle
  3552. */
  3553. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  3554. {
  3555. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3556. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  3557. }
  3558. /*
  3559. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  3560. * @txrx_soc: Datapath SOC handle
  3561. * @nss_cfg: nss config
  3562. */
  3563. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  3564. {
  3565. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3566. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  3567. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  3568. /*
  3569. * TODO: masked out based on the per offloaded radio
  3570. */
  3571. switch (config) {
  3572. case dp_nss_cfg_default:
  3573. break;
  3574. case dp_nss_cfg_dbdc:
  3575. case dp_nss_cfg_dbtc:
  3576. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  3577. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  3578. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  3579. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  3580. break;
  3581. default:
  3582. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3583. "Invalid offload config %d", config);
  3584. }
  3585. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3586. FL("nss-wifi<0> nss config is enabled"));
  3587. }
  3588. /*
  3589. * dp_vdev_attach_wifi3() - attach txrx vdev
  3590. * @txrx_pdev: Datapath PDEV handle
  3591. * @vdev_mac_addr: MAC address of the virtual interface
  3592. * @vdev_id: VDEV Id
  3593. * @wlan_op_mode: VDEV operating mode
  3594. *
  3595. * Return: DP VDEV handle on success, NULL on failure
  3596. */
  3597. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  3598. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  3599. {
  3600. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3601. struct dp_soc *soc = pdev->soc;
  3602. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  3603. if (!vdev) {
  3604. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3605. FL("DP VDEV memory allocation failed"));
  3606. goto fail0;
  3607. }
  3608. vdev->pdev = pdev;
  3609. vdev->vdev_id = vdev_id;
  3610. vdev->opmode = op_mode;
  3611. vdev->osdev = soc->osdev;
  3612. vdev->osif_rx = NULL;
  3613. vdev->osif_rsim_rx_decap = NULL;
  3614. vdev->osif_get_key = NULL;
  3615. vdev->osif_rx_mon = NULL;
  3616. vdev->osif_tx_free_ext = NULL;
  3617. vdev->osif_vdev = NULL;
  3618. vdev->delete.pending = 0;
  3619. vdev->safemode = 0;
  3620. vdev->drop_unenc = 1;
  3621. vdev->sec_type = cdp_sec_type_none;
  3622. #ifdef notyet
  3623. vdev->filters_num = 0;
  3624. #endif
  3625. qdf_mem_copy(
  3626. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3627. /* TODO: Initialize default HTT meta data that will be used in
  3628. * TCL descriptors for packets transmitted from this VDEV
  3629. */
  3630. TAILQ_INIT(&vdev->peer_list);
  3631. if ((soc->intr_mode == DP_INTR_POLL) &&
  3632. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  3633. if ((pdev->vdev_count == 0) ||
  3634. (wlan_op_mode_monitor == vdev->opmode))
  3635. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  3636. }
  3637. if (wlan_op_mode_monitor == vdev->opmode) {
  3638. pdev->monitor_vdev = vdev;
  3639. return (struct cdp_vdev *)vdev;
  3640. }
  3641. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3642. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3643. vdev->dscp_tid_map_id = 0;
  3644. vdev->mcast_enhancement_en = 0;
  3645. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  3646. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3647. /* add this vdev into the pdev's list */
  3648. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  3649. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3650. pdev->vdev_count++;
  3651. dp_tx_vdev_attach(vdev);
  3652. if (pdev->vdev_count == 1)
  3653. dp_lro_hash_setup(soc, pdev);
  3654. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3655. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  3656. DP_STATS_INIT(vdev);
  3657. if (wlan_op_mode_sta == vdev->opmode)
  3658. dp_peer_create_wifi3((struct cdp_vdev *)vdev,
  3659. vdev->mac_addr.raw,
  3660. NULL);
  3661. return (struct cdp_vdev *)vdev;
  3662. fail0:
  3663. return NULL;
  3664. }
  3665. /**
  3666. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  3667. * @vdev: Datapath VDEV handle
  3668. * @osif_vdev: OSIF vdev handle
  3669. * @ctrl_vdev: UMAC vdev handle
  3670. * @txrx_ops: Tx and Rx operations
  3671. *
  3672. * Return: DP VDEV handle on success, NULL on failure
  3673. */
  3674. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  3675. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  3676. struct ol_txrx_ops *txrx_ops)
  3677. {
  3678. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3679. vdev->osif_vdev = osif_vdev;
  3680. vdev->ctrl_vdev = ctrl_vdev;
  3681. vdev->osif_rx = txrx_ops->rx.rx;
  3682. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  3683. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  3684. vdev->osif_get_key = txrx_ops->get_key;
  3685. vdev->osif_rx_mon = txrx_ops->rx.mon;
  3686. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  3687. #ifdef notyet
  3688. #if ATH_SUPPORT_WAPI
  3689. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  3690. #endif
  3691. #endif
  3692. #ifdef UMAC_SUPPORT_PROXY_ARP
  3693. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  3694. #endif
  3695. vdev->me_convert = txrx_ops->me_convert;
  3696. /* TODO: Enable the following once Tx code is integrated */
  3697. if (vdev->mesh_vdev)
  3698. txrx_ops->tx.tx = dp_tx_send_mesh;
  3699. else
  3700. txrx_ops->tx.tx = dp_tx_send;
  3701. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  3702. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  3703. "DP Vdev Register success");
  3704. }
  3705. /**
  3706. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  3707. * @vdev: Datapath VDEV handle
  3708. *
  3709. * Return: void
  3710. */
  3711. static void dp_vdev_flush_peers(struct dp_vdev *vdev)
  3712. {
  3713. struct dp_pdev *pdev = vdev->pdev;
  3714. struct dp_soc *soc = pdev->soc;
  3715. struct dp_peer *peer;
  3716. uint16_t *peer_ids;
  3717. uint8_t i = 0, j = 0;
  3718. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  3719. if (!peer_ids) {
  3720. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3721. "DP alloc failure - unable to flush peers");
  3722. return;
  3723. }
  3724. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3725. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3726. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3727. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  3728. if (j < soc->max_peers)
  3729. peer_ids[j++] = peer->peer_ids[i];
  3730. }
  3731. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3732. for (i = 0; i < j ; i++) {
  3733. peer = dp_peer_find_by_id(soc, peer_ids[i]);
  3734. if (peer) {
  3735. dp_info("peer: %pM is getting flush",
  3736. peer->mac_addr.raw);
  3737. dp_peer_delete_wifi3(peer, 0);
  3738. /*
  3739. * we need to call dp_peer_unref_del_find_by_id()
  3740. * to remove additional ref count incremented
  3741. * by dp_peer_find_by_id() call.
  3742. *
  3743. * Hold the ref count while executing
  3744. * dp_peer_delete_wifi3() call.
  3745. *
  3746. */
  3747. dp_peer_unref_del_find_by_id(peer);
  3748. }
  3749. dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
  3750. NULL, 0);
  3751. }
  3752. qdf_mem_free(peer_ids);
  3753. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3754. FL("Flushed peers for vdev object %pK "), vdev);
  3755. }
  3756. /*
  3757. * dp_vdev_detach_wifi3() - Detach txrx vdev
  3758. * @txrx_vdev: Datapath VDEV handle
  3759. * @callback: Callback OL_IF on completion of detach
  3760. * @cb_context: Callback context
  3761. *
  3762. */
  3763. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  3764. ol_txrx_vdev_delete_cb callback, void *cb_context)
  3765. {
  3766. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3767. struct dp_pdev *pdev = vdev->pdev;
  3768. struct dp_soc *soc = pdev->soc;
  3769. struct dp_neighbour_peer *peer = NULL;
  3770. struct dp_neighbour_peer *temp_peer = NULL;
  3771. /* preconditions */
  3772. qdf_assert(vdev);
  3773. if (wlan_op_mode_monitor == vdev->opmode)
  3774. goto free_vdev;
  3775. if (wlan_op_mode_sta == vdev->opmode)
  3776. dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
  3777. /*
  3778. * If Target is hung, flush all peers before detaching vdev
  3779. * this will free all references held due to missing
  3780. * unmap commands from Target
  3781. */
  3782. if ((hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) ||
  3783. !hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  3784. dp_vdev_flush_peers(vdev);
  3785. /*
  3786. * Use peer_ref_mutex while accessing peer_list, in case
  3787. * a peer is in the process of being removed from the list.
  3788. */
  3789. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3790. /* check that the vdev has no peers allocated */
  3791. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  3792. /* debug print - will be removed later */
  3793. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  3794. FL("not deleting vdev object %pK (%pM)"
  3795. "until deletion finishes for all its peers"),
  3796. vdev, vdev->mac_addr.raw);
  3797. /* indicate that the vdev needs to be deleted */
  3798. vdev->delete.pending = 1;
  3799. vdev->delete.callback = callback;
  3800. vdev->delete.context = cb_context;
  3801. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3802. return;
  3803. }
  3804. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3805. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  3806. if (!soc->hw_nac_monitor_support) {
  3807. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  3808. neighbour_peer_list_elem) {
  3809. QDF_ASSERT(peer->vdev != vdev);
  3810. }
  3811. } else {
  3812. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3813. neighbour_peer_list_elem, temp_peer) {
  3814. if (peer->vdev == vdev) {
  3815. TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
  3816. neighbour_peer_list_elem);
  3817. qdf_mem_free(peer);
  3818. }
  3819. }
  3820. }
  3821. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  3822. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3823. dp_tx_vdev_detach(vdev);
  3824. /* remove the vdev from its parent pdev's list */
  3825. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  3826. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3827. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  3828. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3829. free_vdev:
  3830. qdf_mem_free(vdev);
  3831. if (callback)
  3832. callback(cb_context);
  3833. }
  3834. /*
  3835. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  3836. * @soc - datapath soc handle
  3837. * @peer - datapath peer handle
  3838. *
  3839. * Delete the AST entries belonging to a peer
  3840. */
  3841. #ifdef FEATURE_AST
  3842. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3843. struct dp_peer *peer)
  3844. {
  3845. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  3846. qdf_spin_lock_bh(&soc->ast_lock);
  3847. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  3848. dp_peer_del_ast(soc, ast_entry);
  3849. peer->self_ast_entry = NULL;
  3850. TAILQ_INIT(&peer->ast_entry_list);
  3851. qdf_spin_unlock_bh(&soc->ast_lock);
  3852. }
  3853. #else
  3854. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3855. struct dp_peer *peer)
  3856. {
  3857. }
  3858. #endif
  3859. #if ATH_SUPPORT_WRAP
  3860. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3861. uint8_t *peer_mac_addr)
  3862. {
  3863. struct dp_peer *peer;
  3864. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3865. 0, vdev->vdev_id);
  3866. if (!peer)
  3867. return NULL;
  3868. if (peer->bss_peer)
  3869. return peer;
  3870. dp_peer_unref_delete(peer);
  3871. return NULL;
  3872. }
  3873. #else
  3874. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3875. uint8_t *peer_mac_addr)
  3876. {
  3877. struct dp_peer *peer;
  3878. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3879. 0, vdev->vdev_id);
  3880. if (!peer)
  3881. return NULL;
  3882. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  3883. return peer;
  3884. dp_peer_unref_delete(peer);
  3885. return NULL;
  3886. }
  3887. #endif
  3888. #if defined(FEATURE_AST)
  3889. #if !defined(AST_HKV1_WORKAROUND)
  3890. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3891. uint8_t *peer_mac_addr)
  3892. {
  3893. struct dp_ast_entry *ast_entry;
  3894. qdf_spin_lock_bh(&soc->ast_lock);
  3895. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3896. if (ast_entry && ast_entry->next_hop)
  3897. dp_peer_del_ast(soc, ast_entry);
  3898. qdf_spin_unlock_bh(&soc->ast_lock);
  3899. }
  3900. #else
  3901. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3902. uint8_t *peer_mac_addr)
  3903. {
  3904. struct dp_ast_entry *ast_entry;
  3905. if (soc->ast_override_support) {
  3906. qdf_spin_lock_bh(&soc->ast_lock);
  3907. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3908. if (ast_entry && ast_entry->next_hop)
  3909. dp_peer_del_ast(soc, ast_entry);
  3910. qdf_spin_unlock_bh(&soc->ast_lock);
  3911. }
  3912. }
  3913. #endif
  3914. #else
  3915. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3916. uint8_t *peer_mac_addr)
  3917. {
  3918. }
  3919. #endif
  3920. /*
  3921. * dp_peer_create_wifi3() - attach txrx peer
  3922. * @txrx_vdev: Datapath VDEV handle
  3923. * @peer_mac_addr: Peer MAC address
  3924. *
  3925. * Return: DP peeer handle on success, NULL on failure
  3926. */
  3927. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  3928. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  3929. {
  3930. struct dp_peer *peer;
  3931. int i;
  3932. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3933. struct dp_pdev *pdev;
  3934. struct dp_soc *soc;
  3935. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  3936. /* preconditions */
  3937. qdf_assert(vdev);
  3938. qdf_assert(peer_mac_addr);
  3939. pdev = vdev->pdev;
  3940. soc = pdev->soc;
  3941. /*
  3942. * If a peer entry with given MAC address already exists,
  3943. * reuse the peer and reset the state of peer.
  3944. */
  3945. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  3946. if (peer) {
  3947. qdf_atomic_init(&peer->is_default_route_set);
  3948. dp_peer_cleanup(vdev, peer);
  3949. peer->delete_in_progress = false;
  3950. dp_peer_delete_ast_entries(soc, peer);
  3951. if ((vdev->opmode == wlan_op_mode_sta) &&
  3952. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  3953. DP_MAC_ADDR_LEN)) {
  3954. ast_type = CDP_TXRX_AST_TYPE_SELF;
  3955. }
  3956. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  3957. /*
  3958. * Control path maintains a node count which is incremented
  3959. * for every new peer create command. Since new peer is not being
  3960. * created and earlier reference is reused here,
  3961. * peer_unref_delete event is sent to control path to
  3962. * increment the count back.
  3963. */
  3964. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  3965. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  3966. peer->mac_addr.raw, vdev->mac_addr.raw,
  3967. vdev->opmode);
  3968. }
  3969. peer->ctrl_peer = ctrl_peer;
  3970. dp_local_peer_id_alloc(pdev, peer);
  3971. DP_STATS_INIT(peer);
  3972. return (void *)peer;
  3973. } else {
  3974. /*
  3975. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  3976. * need to remove the AST entry which was earlier added as a WDS
  3977. * entry.
  3978. * If an AST entry exists, but no peer entry exists with a given
  3979. * MAC addresses, we could deduce it as a WDS entry
  3980. */
  3981. dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
  3982. }
  3983. #ifdef notyet
  3984. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  3985. soc->mempool_ol_ath_peer);
  3986. #else
  3987. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  3988. #endif
  3989. if (!peer)
  3990. return NULL; /* failure */
  3991. qdf_mem_zero(peer, sizeof(struct dp_peer));
  3992. TAILQ_INIT(&peer->ast_entry_list);
  3993. /* store provided params */
  3994. peer->vdev = vdev;
  3995. peer->ctrl_peer = ctrl_peer;
  3996. if ((vdev->opmode == wlan_op_mode_sta) &&
  3997. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  3998. DP_MAC_ADDR_LEN)) {
  3999. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4000. }
  4001. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4002. qdf_spinlock_create(&peer->peer_info_lock);
  4003. qdf_mem_copy(
  4004. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  4005. /* TODO: See of rx_opt_proc is really required */
  4006. peer->rx_opt_proc = soc->rx_opt_proc;
  4007. /* initialize the peer_id */
  4008. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4009. peer->peer_ids[i] = HTT_INVALID_PEER;
  4010. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4011. qdf_atomic_init(&peer->ref_cnt);
  4012. /* keep one reference for attach */
  4013. qdf_atomic_inc(&peer->ref_cnt);
  4014. /* add this peer into the vdev's list */
  4015. if (wlan_op_mode_sta == vdev->opmode)
  4016. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  4017. else
  4018. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  4019. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4020. /* TODO: See if hash based search is required */
  4021. dp_peer_find_hash_add(soc, peer);
  4022. /* Initialize the peer state */
  4023. peer->state = OL_TXRX_PEER_STATE_DISC;
  4024. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4025. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  4026. vdev, peer, peer->mac_addr.raw,
  4027. qdf_atomic_read(&peer->ref_cnt));
  4028. /*
  4029. * For every peer MAp message search and set if bss_peer
  4030. */
  4031. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  4032. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4033. "vdev bss_peer!!!!");
  4034. peer->bss_peer = 1;
  4035. vdev->vap_bss_peer = peer;
  4036. }
  4037. for (i = 0; i < DP_MAX_TIDS; i++)
  4038. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  4039. dp_local_peer_id_alloc(pdev, peer);
  4040. DP_STATS_INIT(peer);
  4041. return (void *)peer;
  4042. }
  4043. /*
  4044. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  4045. * @vdev: Datapath VDEV handle
  4046. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4047. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4048. *
  4049. * Return: None
  4050. */
  4051. static
  4052. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4053. enum cdp_host_reo_dest_ring *reo_dest,
  4054. bool *hash_based)
  4055. {
  4056. struct dp_soc *soc;
  4057. struct dp_pdev *pdev;
  4058. pdev = vdev->pdev;
  4059. soc = pdev->soc;
  4060. /*
  4061. * hash based steering is disabled for Radios which are offloaded
  4062. * to NSS
  4063. */
  4064. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4065. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4066. /*
  4067. * Below line of code will ensure the proper reo_dest ring is chosen
  4068. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4069. */
  4070. *reo_dest = pdev->reo_dest;
  4071. }
  4072. #ifdef IPA_OFFLOAD
  4073. /*
  4074. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4075. * @vdev: Datapath VDEV handle
  4076. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4077. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4078. *
  4079. * If IPA is enabled in ini, for SAP mode, disable hash based
  4080. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4081. * Return: None
  4082. */
  4083. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4084. enum cdp_host_reo_dest_ring *reo_dest,
  4085. bool *hash_based)
  4086. {
  4087. struct dp_soc *soc;
  4088. struct dp_pdev *pdev;
  4089. pdev = vdev->pdev;
  4090. soc = pdev->soc;
  4091. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4092. /*
  4093. * If IPA is enabled, disable hash-based flow steering and set
  4094. * reo_dest_ring_4 as the REO ring to receive packets on.
  4095. * IPA is configured to reap reo_dest_ring_4.
  4096. *
  4097. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4098. * value enum value is from 1 - 4.
  4099. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4100. */
  4101. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4102. if (vdev->opmode == wlan_op_mode_ap) {
  4103. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4104. *hash_based = 0;
  4105. }
  4106. }
  4107. }
  4108. #else
  4109. /*
  4110. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4111. * @vdev: Datapath VDEV handle
  4112. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4113. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4114. *
  4115. * Use system config values for hash based steering.
  4116. * Return: None
  4117. */
  4118. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4119. enum cdp_host_reo_dest_ring *reo_dest,
  4120. bool *hash_based)
  4121. {
  4122. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4123. }
  4124. #endif /* IPA_OFFLOAD */
  4125. /*
  4126. * dp_peer_setup_wifi3() - initialize the peer
  4127. * @vdev_hdl: virtual device object
  4128. * @peer: Peer object
  4129. *
  4130. * Return: void
  4131. */
  4132. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4133. {
  4134. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  4135. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  4136. struct dp_pdev *pdev;
  4137. struct dp_soc *soc;
  4138. bool hash_based = 0;
  4139. enum cdp_host_reo_dest_ring reo_dest;
  4140. /* preconditions */
  4141. qdf_assert(vdev);
  4142. qdf_assert(peer);
  4143. pdev = vdev->pdev;
  4144. soc = pdev->soc;
  4145. peer->last_assoc_rcvd = 0;
  4146. peer->last_disassoc_rcvd = 0;
  4147. peer->last_deauth_rcvd = 0;
  4148. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4149. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4150. pdev->pdev_id, vdev->vdev_id,
  4151. vdev->opmode, hash_based, reo_dest);
  4152. /*
  4153. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4154. * i.e both the devices have same MAC address. In these
  4155. * cases we want such pkts to be processed in NULL Q handler
  4156. * which is REO2TCL ring. for this reason we should
  4157. * not setup reo_queues and default route for bss_peer.
  4158. */
  4159. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
  4160. return;
  4161. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4162. /* TODO: Check the destination ring number to be passed to FW */
  4163. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4164. pdev->ctrl_pdev, peer->mac_addr.raw,
  4165. peer->vdev->vdev_id, hash_based, reo_dest);
  4166. }
  4167. qdf_atomic_set(&peer->is_default_route_set, 1);
  4168. dp_peer_rx_init(pdev, peer);
  4169. return;
  4170. }
  4171. /*
  4172. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  4173. * @vdev_handle: virtual device object
  4174. * @htt_pkt_type: type of pkt
  4175. *
  4176. * Return: void
  4177. */
  4178. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  4179. enum htt_cmn_pkt_type val)
  4180. {
  4181. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4182. vdev->tx_encap_type = val;
  4183. }
  4184. /*
  4185. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  4186. * @vdev_handle: virtual device object
  4187. * @htt_pkt_type: type of pkt
  4188. *
  4189. * Return: void
  4190. */
  4191. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  4192. enum htt_cmn_pkt_type val)
  4193. {
  4194. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4195. vdev->rx_decap_type = val;
  4196. }
  4197. /*
  4198. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  4199. * @txrx_soc: cdp soc handle
  4200. * @ac: Access category
  4201. * @value: timeout value in millisec
  4202. *
  4203. * Return: void
  4204. */
  4205. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4206. uint8_t ac, uint32_t value)
  4207. {
  4208. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4209. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  4210. }
  4211. /*
  4212. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  4213. * @txrx_soc: cdp soc handle
  4214. * @ac: access category
  4215. * @value: timeout value in millisec
  4216. *
  4217. * Return: void
  4218. */
  4219. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4220. uint8_t ac, uint32_t *value)
  4221. {
  4222. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4223. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  4224. }
  4225. /*
  4226. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  4227. * @pdev_handle: physical device object
  4228. * @val: reo destination ring index (1 - 4)
  4229. *
  4230. * Return: void
  4231. */
  4232. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  4233. enum cdp_host_reo_dest_ring val)
  4234. {
  4235. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4236. if (pdev)
  4237. pdev->reo_dest = val;
  4238. }
  4239. /*
  4240. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  4241. * @pdev_handle: physical device object
  4242. *
  4243. * Return: reo destination ring index
  4244. */
  4245. static enum cdp_host_reo_dest_ring
  4246. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  4247. {
  4248. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4249. if (pdev)
  4250. return pdev->reo_dest;
  4251. else
  4252. return cdp_host_reo_dest_ring_unknown;
  4253. }
  4254. /*
  4255. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  4256. * @pdev_handle: device object
  4257. * @val: value to be set
  4258. *
  4259. * Return: void
  4260. */
  4261. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  4262. uint32_t val)
  4263. {
  4264. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4265. /* Enable/Disable smart mesh filtering. This flag will be checked
  4266. * during rx processing to check if packets are from NAC clients.
  4267. */
  4268. pdev->filter_neighbour_peers = val;
  4269. return 0;
  4270. }
  4271. /*
  4272. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  4273. * address for smart mesh filtering
  4274. * @vdev_handle: virtual device object
  4275. * @cmd: Add/Del command
  4276. * @macaddr: nac client mac address
  4277. *
  4278. * Return: void
  4279. */
  4280. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  4281. uint32_t cmd, uint8_t *macaddr)
  4282. {
  4283. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4284. struct dp_pdev *pdev = vdev->pdev;
  4285. struct dp_neighbour_peer *peer = NULL;
  4286. if (!macaddr)
  4287. goto fail0;
  4288. /* Store address of NAC (neighbour peer) which will be checked
  4289. * against TA of received packets.
  4290. */
  4291. if (cmd == DP_NAC_PARAM_ADD) {
  4292. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  4293. sizeof(*peer));
  4294. if (!peer) {
  4295. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4296. FL("DP neighbour peer node memory allocation failed"));
  4297. goto fail0;
  4298. }
  4299. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  4300. macaddr, DP_MAC_ADDR_LEN);
  4301. peer->vdev = vdev;
  4302. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4303. /* add this neighbour peer into the list */
  4304. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  4305. neighbour_peer_list_elem);
  4306. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4307. /* first neighbour */
  4308. if (!pdev->neighbour_peers_added) {
  4309. pdev->neighbour_peers_added = true;
  4310. dp_ppdu_ring_cfg(pdev);
  4311. }
  4312. return 1;
  4313. } else if (cmd == DP_NAC_PARAM_DEL) {
  4314. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4315. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4316. neighbour_peer_list_elem) {
  4317. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  4318. macaddr, DP_MAC_ADDR_LEN)) {
  4319. /* delete this peer from the list */
  4320. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  4321. peer, neighbour_peer_list_elem);
  4322. qdf_mem_free(peer);
  4323. break;
  4324. }
  4325. }
  4326. /* last neighbour deleted */
  4327. if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
  4328. pdev->neighbour_peers_added = false;
  4329. dp_ppdu_ring_cfg(pdev);
  4330. }
  4331. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4332. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  4333. !pdev->enhanced_stats_en)
  4334. dp_ppdu_ring_reset(pdev);
  4335. return 1;
  4336. }
  4337. fail0:
  4338. return 0;
  4339. }
  4340. /*
  4341. * dp_get_sec_type() - Get the security type
  4342. * @peer: Datapath peer handle
  4343. * @sec_idx: Security id (mcast, ucast)
  4344. *
  4345. * return sec_type: Security type
  4346. */
  4347. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  4348. {
  4349. struct dp_peer *dpeer = (struct dp_peer *)peer;
  4350. return dpeer->security[sec_idx].sec_type;
  4351. }
  4352. /*
  4353. * dp_peer_authorize() - authorize txrx peer
  4354. * @peer_handle: Datapath peer handle
  4355. * @authorize
  4356. *
  4357. */
  4358. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  4359. {
  4360. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4361. struct dp_soc *soc;
  4362. if (peer != NULL) {
  4363. soc = peer->vdev->pdev->soc;
  4364. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4365. peer->authorize = authorize ? 1 : 0;
  4366. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4367. }
  4368. }
  4369. static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
  4370. struct dp_pdev *pdev,
  4371. struct dp_peer *peer,
  4372. uint32_t vdev_id)
  4373. {
  4374. struct dp_vdev *vdev = NULL;
  4375. struct dp_peer *bss_peer = NULL;
  4376. uint8_t *m_addr = NULL;
  4377. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4378. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4379. if (vdev->vdev_id == vdev_id)
  4380. break;
  4381. }
  4382. if (!vdev) {
  4383. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4384. "vdev is NULL");
  4385. } else {
  4386. if (vdev->vap_bss_peer == peer)
  4387. vdev->vap_bss_peer = NULL;
  4388. m_addr = peer->mac_addr.raw;
  4389. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  4390. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4391. m_addr, vdev->mac_addr.raw, vdev->opmode);
  4392. if (vdev && vdev->vap_bss_peer) {
  4393. bss_peer = vdev->vap_bss_peer;
  4394. DP_UPDATE_STATS(vdev, peer);
  4395. }
  4396. }
  4397. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4398. qdf_mem_free(peer);
  4399. }
  4400. /**
  4401. * dp_delete_pending_vdev() - check and process vdev delete
  4402. * @pdev: DP specific pdev pointer
  4403. * @vdev: DP specific vdev pointer
  4404. * @vdev_id: vdev id corresponding to vdev
  4405. *
  4406. * This API does following:
  4407. * 1) It releases tx flow pools buffers as vdev is
  4408. * going down and no peers are associated.
  4409. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  4410. */
  4411. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4412. uint8_t vdev_id)
  4413. {
  4414. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  4415. void *vdev_delete_context = NULL;
  4416. vdev_delete_cb = vdev->delete.callback;
  4417. vdev_delete_context = vdev->delete.context;
  4418. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4419. FL("deleting vdev object %pK (%pM)- its last peer is done"),
  4420. vdev, vdev->mac_addr.raw);
  4421. /* all peers are gone, go ahead and delete it */
  4422. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  4423. FLOW_TYPE_VDEV, vdev_id);
  4424. dp_tx_vdev_detach(vdev);
  4425. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4426. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4427. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4428. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4429. FL("deleting vdev object %pK (%pM)"),
  4430. vdev, vdev->mac_addr.raw);
  4431. qdf_mem_free(vdev);
  4432. vdev = NULL;
  4433. if (vdev_delete_cb)
  4434. vdev_delete_cb(vdev_delete_context);
  4435. }
  4436. /*
  4437. * dp_peer_unref_delete() - unref and delete peer
  4438. * @peer_handle: Datapath peer handle
  4439. *
  4440. */
  4441. void dp_peer_unref_delete(void *peer_handle)
  4442. {
  4443. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4444. struct dp_vdev *vdev = peer->vdev;
  4445. struct dp_pdev *pdev = vdev->pdev;
  4446. struct dp_soc *soc = pdev->soc;
  4447. struct dp_peer *tmppeer;
  4448. int found = 0;
  4449. uint16_t peer_id;
  4450. uint16_t vdev_id;
  4451. bool delete_vdev;
  4452. /*
  4453. * Hold the lock all the way from checking if the peer ref count
  4454. * is zero until the peer references are removed from the hash
  4455. * table and vdev list (if the peer ref count is zero).
  4456. * This protects against a new HL tx operation starting to use the
  4457. * peer object just after this function concludes it's done being used.
  4458. * Furthermore, the lock needs to be held while checking whether the
  4459. * vdev's list of peers is empty, to make sure that list is not modified
  4460. * concurrently with the empty check.
  4461. */
  4462. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4463. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  4464. peer_id = peer->peer_ids[0];
  4465. vdev_id = vdev->vdev_id;
  4466. /*
  4467. * Make sure that the reference to the peer in
  4468. * peer object map is removed
  4469. */
  4470. if (peer_id != HTT_INVALID_PEER)
  4471. soc->peer_id_to_obj_map[peer_id] = NULL;
  4472. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4473. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  4474. /* remove the reference to the peer from the hash table */
  4475. dp_peer_find_hash_remove(soc, peer);
  4476. qdf_spin_lock_bh(&soc->ast_lock);
  4477. if (peer->self_ast_entry) {
  4478. dp_peer_del_ast(soc, peer->self_ast_entry);
  4479. peer->self_ast_entry = NULL;
  4480. }
  4481. qdf_spin_unlock_bh(&soc->ast_lock);
  4482. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  4483. if (tmppeer == peer) {
  4484. found = 1;
  4485. break;
  4486. }
  4487. }
  4488. if (found) {
  4489. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  4490. peer_list_elem);
  4491. } else {
  4492. /*Ignoring the remove operation as peer not found*/
  4493. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4494. "peer:%pK not found in vdev:%pK peerlist:%pK",
  4495. peer, vdev, &peer->vdev->peer_list);
  4496. }
  4497. /* cleanup the peer data */
  4498. dp_peer_cleanup(vdev, peer);
  4499. /* check whether the parent vdev has no peers left */
  4500. if (TAILQ_EMPTY(&vdev->peer_list)) {
  4501. /*
  4502. * capture vdev delete pending flag's status
  4503. * while holding peer_ref_mutex lock
  4504. */
  4505. delete_vdev = vdev->delete.pending;
  4506. /*
  4507. * Now that there are no references to the peer, we can
  4508. * release the peer reference lock.
  4509. */
  4510. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4511. /*
  4512. * Check if the parent vdev was waiting for its peers
  4513. * to be deleted, in order for it to be deleted too.
  4514. */
  4515. if (delete_vdev)
  4516. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  4517. } else {
  4518. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4519. }
  4520. dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
  4521. } else {
  4522. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4523. }
  4524. }
  4525. /*
  4526. * dp_peer_detach_wifi3() – Detach txrx peer
  4527. * @peer_handle: Datapath peer handle
  4528. * @bitmap: bitmap indicating special handling of request.
  4529. *
  4530. */
  4531. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  4532. {
  4533. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4534. /* redirect the peer's rx delivery function to point to a
  4535. * discard func
  4536. */
  4537. peer->rx_opt_proc = dp_rx_discard;
  4538. peer->ctrl_peer = NULL;
  4539. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4540. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  4541. dp_local_peer_id_free(peer->vdev->pdev, peer);
  4542. qdf_spinlock_destroy(&peer->peer_info_lock);
  4543. /*
  4544. * Remove the reference added during peer_attach.
  4545. * The peer will still be left allocated until the
  4546. * PEER_UNMAP message arrives to remove the other
  4547. * reference, added by the PEER_MAP message.
  4548. */
  4549. dp_peer_unref_delete(peer_handle);
  4550. }
  4551. /*
  4552. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  4553. * @peer_handle: Datapath peer handle
  4554. *
  4555. */
  4556. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  4557. {
  4558. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4559. return vdev->mac_addr.raw;
  4560. }
  4561. /*
  4562. * dp_vdev_set_wds() - Enable per packet stats
  4563. * @vdev_handle: DP VDEV handle
  4564. * @val: value
  4565. *
  4566. * Return: none
  4567. */
  4568. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  4569. {
  4570. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4571. vdev->wds_enabled = val;
  4572. return 0;
  4573. }
  4574. /*
  4575. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  4576. * @peer_handle: Datapath peer handle
  4577. *
  4578. */
  4579. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  4580. uint8_t vdev_id)
  4581. {
  4582. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4583. struct dp_vdev *vdev = NULL;
  4584. if (qdf_unlikely(!pdev))
  4585. return NULL;
  4586. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4587. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4588. if (vdev->vdev_id == vdev_id)
  4589. break;
  4590. }
  4591. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4592. return (struct cdp_vdev *)vdev;
  4593. }
  4594. /*
  4595. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
  4596. * @dev: PDEV handle
  4597. *
  4598. * Return: VDEV handle of monitor mode
  4599. */
  4600. static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
  4601. {
  4602. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4603. if (qdf_unlikely(!pdev))
  4604. return NULL;
  4605. return (struct cdp_vdev *)pdev->monitor_vdev;
  4606. }
  4607. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  4608. {
  4609. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4610. return vdev->opmode;
  4611. }
  4612. static
  4613. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
  4614. ol_txrx_rx_fp *stack_fn_p,
  4615. ol_osif_vdev_handle *osif_vdev_p)
  4616. {
  4617. struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
  4618. qdf_assert(vdev);
  4619. *stack_fn_p = vdev->osif_rx_stack;
  4620. *osif_vdev_p = vdev->osif_vdev;
  4621. }
  4622. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  4623. {
  4624. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4625. struct dp_pdev *pdev = vdev->pdev;
  4626. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  4627. }
  4628. /**
  4629. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  4630. * ring based on target
  4631. * @soc: soc handle
  4632. * @mac_for_pdev: pdev_id
  4633. * @pdev: physical device handle
  4634. * @ring_num: mac id
  4635. * @htt_tlv_filter: tlv filter
  4636. *
  4637. * Return: zero on success, non-zero on failure
  4638. */
  4639. static inline
  4640. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  4641. struct dp_pdev *pdev, uint8_t ring_num,
  4642. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  4643. {
  4644. QDF_STATUS status;
  4645. if (soc->wlan_cfg_ctx->rxdma1_enable)
  4646. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4647. pdev->rxdma_mon_buf_ring[ring_num]
  4648. .hal_srng,
  4649. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  4650. &htt_tlv_filter);
  4651. else
  4652. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4653. pdev->rx_mac_buf_ring[ring_num]
  4654. .hal_srng,
  4655. RXDMA_BUF, RX_BUFFER_SIZE,
  4656. &htt_tlv_filter);
  4657. return status;
  4658. }
  4659. /**
  4660. * dp_reset_monitor_mode() - Disable monitor mode
  4661. * @pdev_handle: Datapath PDEV handle
  4662. *
  4663. * Return: 0 on success, not 0 on failure
  4664. */
  4665. static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  4666. {
  4667. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4668. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4669. struct dp_soc *soc = pdev->soc;
  4670. uint8_t pdev_id;
  4671. int mac_id;
  4672. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4673. pdev_id = pdev->pdev_id;
  4674. soc = pdev->soc;
  4675. qdf_spin_lock_bh(&pdev->mon_lock);
  4676. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4677. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4678. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4679. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4680. pdev, mac_id,
  4681. htt_tlv_filter);
  4682. if (status != QDF_STATUS_SUCCESS) {
  4683. dp_err("Failed to send tlv filter for monitor mode rings");
  4684. return status;
  4685. }
  4686. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4687. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4688. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  4689. &htt_tlv_filter);
  4690. }
  4691. pdev->monitor_vdev = NULL;
  4692. pdev->mcopy_mode = 0;
  4693. qdf_spin_unlock_bh(&pdev->mon_lock);
  4694. return QDF_STATUS_SUCCESS;
  4695. }
  4696. /**
  4697. * dp_set_nac() - set peer_nac
  4698. * @peer_handle: Datapath PEER handle
  4699. *
  4700. * Return: void
  4701. */
  4702. static void dp_set_nac(struct cdp_peer *peer_handle)
  4703. {
  4704. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4705. peer->nac = 1;
  4706. }
  4707. /**
  4708. * dp_get_tx_pending() - read pending tx
  4709. * @pdev_handle: Datapath PDEV handle
  4710. *
  4711. * Return: outstanding tx
  4712. */
  4713. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  4714. {
  4715. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4716. return qdf_atomic_read(&pdev->num_tx_outstanding);
  4717. }
  4718. /**
  4719. * dp_get_peer_mac_from_peer_id() - get peer mac
  4720. * @pdev_handle: Datapath PDEV handle
  4721. * @peer_id: Peer ID
  4722. * @peer_mac: MAC addr of PEER
  4723. *
  4724. * Return: void
  4725. */
  4726. static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
  4727. uint32_t peer_id, uint8_t *peer_mac)
  4728. {
  4729. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4730. struct dp_peer *peer;
  4731. if (pdev && peer_mac) {
  4732. peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
  4733. if (peer) {
  4734. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  4735. DP_MAC_ADDR_LEN);
  4736. dp_peer_unref_del_find_by_id(peer);
  4737. }
  4738. }
  4739. }
  4740. /**
  4741. * dp_pdev_configure_monitor_rings() - configure monitor rings
  4742. * @vdev_handle: Datapath VDEV handle
  4743. *
  4744. * Return: void
  4745. */
  4746. static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
  4747. {
  4748. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4749. struct dp_soc *soc;
  4750. uint8_t pdev_id;
  4751. int mac_id;
  4752. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4753. pdev_id = pdev->pdev_id;
  4754. soc = pdev->soc;
  4755. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4756. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4757. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4758. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4759. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4760. pdev->mo_data_filter);
  4761. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4762. htt_tlv_filter.mpdu_start = 1;
  4763. htt_tlv_filter.msdu_start = 1;
  4764. htt_tlv_filter.packet = 1;
  4765. htt_tlv_filter.msdu_end = 1;
  4766. htt_tlv_filter.mpdu_end = 1;
  4767. htt_tlv_filter.packet_header = 1;
  4768. htt_tlv_filter.attention = 1;
  4769. htt_tlv_filter.ppdu_start = 0;
  4770. htt_tlv_filter.ppdu_end = 0;
  4771. htt_tlv_filter.ppdu_end_user_stats = 0;
  4772. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4773. htt_tlv_filter.ppdu_end_status_done = 0;
  4774. htt_tlv_filter.header_per_msdu = 1;
  4775. htt_tlv_filter.enable_fp =
  4776. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4777. htt_tlv_filter.enable_md = 0;
  4778. htt_tlv_filter.enable_mo =
  4779. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4780. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4781. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4782. if (pdev->mcopy_mode)
  4783. htt_tlv_filter.fp_data_filter = 0;
  4784. else
  4785. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4786. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4787. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4788. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4789. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4790. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4791. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4792. pdev, mac_id,
  4793. htt_tlv_filter);
  4794. if (status != QDF_STATUS_SUCCESS) {
  4795. dp_err("Failed to send tlv filter for monitor mode rings");
  4796. return status;
  4797. }
  4798. }
  4799. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4800. htt_tlv_filter.mpdu_start = 1;
  4801. htt_tlv_filter.msdu_start = 0;
  4802. htt_tlv_filter.packet = 0;
  4803. htt_tlv_filter.msdu_end = 0;
  4804. htt_tlv_filter.mpdu_end = 0;
  4805. htt_tlv_filter.attention = 0;
  4806. htt_tlv_filter.ppdu_start = 1;
  4807. htt_tlv_filter.ppdu_end = 1;
  4808. htt_tlv_filter.ppdu_end_user_stats = 1;
  4809. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4810. htt_tlv_filter.ppdu_end_status_done = 1;
  4811. htt_tlv_filter.enable_fp = 1;
  4812. htt_tlv_filter.enable_md = 0;
  4813. htt_tlv_filter.enable_mo = 1;
  4814. if (pdev->mcopy_mode) {
  4815. htt_tlv_filter.packet_header = 1;
  4816. }
  4817. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4818. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4819. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4820. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4821. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4822. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4823. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4824. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4825. pdev->pdev_id);
  4826. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4827. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4828. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4829. }
  4830. return status;
  4831. }
  4832. /**
  4833. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  4834. * @vdev_handle: Datapath VDEV handle
  4835. * @smart_monitor: Flag to denote if its smart monitor mode
  4836. *
  4837. * Return: 0 on success, not 0 on failure
  4838. */
  4839. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  4840. uint8_t smart_monitor)
  4841. {
  4842. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4843. struct dp_pdev *pdev;
  4844. qdf_assert(vdev);
  4845. pdev = vdev->pdev;
  4846. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4847. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  4848. pdev, pdev->pdev_id, pdev->soc, vdev);
  4849. /*Check if current pdev's monitor_vdev exists */
  4850. if (pdev->monitor_vdev || pdev->mcopy_mode) {
  4851. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4852. "monitor vap already created vdev=%pK\n", vdev);
  4853. qdf_assert(vdev);
  4854. return QDF_STATUS_E_RESOURCES;
  4855. }
  4856. pdev->monitor_vdev = vdev;
  4857. /* If smart monitor mode, do not configure monitor ring */
  4858. if (smart_monitor)
  4859. return QDF_STATUS_SUCCESS;
  4860. return dp_pdev_configure_monitor_rings(pdev);
  4861. }
  4862. /**
  4863. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  4864. * @pdev_handle: Datapath PDEV handle
  4865. * @filter_val: Flag to select Filter for monitor mode
  4866. * Return: 0 on success, not 0 on failure
  4867. */
  4868. static QDF_STATUS
  4869. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  4870. struct cdp_monitor_filter *filter_val)
  4871. {
  4872. /* Many monitor VAPs can exists in a system but only one can be up at
  4873. * anytime
  4874. */
  4875. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4876. struct dp_vdev *vdev = pdev->monitor_vdev;
  4877. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4878. struct dp_soc *soc;
  4879. uint8_t pdev_id;
  4880. int mac_id;
  4881. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4882. pdev_id = pdev->pdev_id;
  4883. soc = pdev->soc;
  4884. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4885. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4886. pdev, pdev_id, soc, vdev);
  4887. /*Check if current pdev's monitor_vdev exists */
  4888. if (!pdev->monitor_vdev) {
  4889. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4890. "vdev=%pK", vdev);
  4891. qdf_assert(vdev);
  4892. }
  4893. /* update filter mode, type in pdev structure */
  4894. pdev->mon_filter_mode = filter_val->mode;
  4895. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  4896. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  4897. pdev->fp_data_filter = filter_val->fp_data;
  4898. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  4899. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  4900. pdev->mo_data_filter = filter_val->mo_data;
  4901. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4902. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4903. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4904. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4905. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4906. pdev->mo_data_filter);
  4907. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4908. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4909. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4910. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4911. pdev, mac_id,
  4912. htt_tlv_filter);
  4913. if (status != QDF_STATUS_SUCCESS) {
  4914. dp_err("Failed to send tlv filter for monitor mode rings");
  4915. return status;
  4916. }
  4917. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4918. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4919. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4920. }
  4921. htt_tlv_filter.mpdu_start = 1;
  4922. htt_tlv_filter.msdu_start = 1;
  4923. htt_tlv_filter.packet = 1;
  4924. htt_tlv_filter.msdu_end = 1;
  4925. htt_tlv_filter.mpdu_end = 1;
  4926. htt_tlv_filter.packet_header = 1;
  4927. htt_tlv_filter.attention = 1;
  4928. htt_tlv_filter.ppdu_start = 0;
  4929. htt_tlv_filter.ppdu_end = 0;
  4930. htt_tlv_filter.ppdu_end_user_stats = 0;
  4931. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4932. htt_tlv_filter.ppdu_end_status_done = 0;
  4933. htt_tlv_filter.header_per_msdu = 1;
  4934. htt_tlv_filter.enable_fp =
  4935. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4936. htt_tlv_filter.enable_md = 0;
  4937. htt_tlv_filter.enable_mo =
  4938. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4939. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4940. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4941. if (pdev->mcopy_mode)
  4942. htt_tlv_filter.fp_data_filter = 0;
  4943. else
  4944. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4945. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4946. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4947. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4948. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4949. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4950. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4951. pdev, mac_id,
  4952. htt_tlv_filter);
  4953. if (status != QDF_STATUS_SUCCESS) {
  4954. dp_err("Failed to send tlv filter for monitor mode rings");
  4955. return status;
  4956. }
  4957. }
  4958. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4959. htt_tlv_filter.mpdu_start = 1;
  4960. htt_tlv_filter.msdu_start = 0;
  4961. htt_tlv_filter.packet = 0;
  4962. htt_tlv_filter.msdu_end = 0;
  4963. htt_tlv_filter.mpdu_end = 0;
  4964. htt_tlv_filter.attention = 0;
  4965. htt_tlv_filter.ppdu_start = 1;
  4966. htt_tlv_filter.ppdu_end = 1;
  4967. htt_tlv_filter.ppdu_end_user_stats = 1;
  4968. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4969. htt_tlv_filter.ppdu_end_status_done = 1;
  4970. htt_tlv_filter.enable_fp = 1;
  4971. htt_tlv_filter.enable_md = 0;
  4972. htt_tlv_filter.enable_mo = 1;
  4973. if (pdev->mcopy_mode) {
  4974. htt_tlv_filter.packet_header = 1;
  4975. }
  4976. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4977. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4978. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4979. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4980. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4981. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4982. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4983. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4984. pdev->pdev_id);
  4985. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4986. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4987. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4988. }
  4989. return QDF_STATUS_SUCCESS;
  4990. }
  4991. /**
  4992. * dp_get_pdev_id_frm_pdev() - get pdev_id
  4993. * @pdev_handle: Datapath PDEV handle
  4994. *
  4995. * Return: pdev_id
  4996. */
  4997. static
  4998. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  4999. {
  5000. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5001. return pdev->pdev_id;
  5002. }
  5003. /**
  5004. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  5005. * @pdev_handle: Datapath PDEV handle
  5006. * @chan_noise_floor: Channel Noise Floor
  5007. *
  5008. * Return: void
  5009. */
  5010. static
  5011. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  5012. int16_t chan_noise_floor)
  5013. {
  5014. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5015. pdev->chan_noise_floor = chan_noise_floor;
  5016. }
  5017. /**
  5018. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  5019. * @vdev_handle: Datapath VDEV handle
  5020. * Return: true on ucast filter flag set
  5021. */
  5022. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  5023. {
  5024. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5025. struct dp_pdev *pdev;
  5026. pdev = vdev->pdev;
  5027. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  5028. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  5029. return true;
  5030. return false;
  5031. }
  5032. /**
  5033. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  5034. * @vdev_handle: Datapath VDEV handle
  5035. * Return: true on mcast filter flag set
  5036. */
  5037. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  5038. {
  5039. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5040. struct dp_pdev *pdev;
  5041. pdev = vdev->pdev;
  5042. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  5043. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  5044. return true;
  5045. return false;
  5046. }
  5047. /**
  5048. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  5049. * @vdev_handle: Datapath VDEV handle
  5050. * Return: true on non data filter flag set
  5051. */
  5052. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  5053. {
  5054. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5055. struct dp_pdev *pdev;
  5056. pdev = vdev->pdev;
  5057. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  5058. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  5059. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  5060. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  5061. return true;
  5062. }
  5063. }
  5064. return false;
  5065. }
  5066. #ifdef MESH_MODE_SUPPORT
  5067. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  5068. {
  5069. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5070. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5071. FL("val %d"), val);
  5072. vdev->mesh_vdev = val;
  5073. }
  5074. /*
  5075. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  5076. * @vdev_hdl: virtual device object
  5077. * @val: value to be set
  5078. *
  5079. * Return: void
  5080. */
  5081. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  5082. {
  5083. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5084. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5085. FL("val %d"), val);
  5086. vdev->mesh_rx_filter = val;
  5087. }
  5088. #endif
  5089. /*
  5090. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  5091. * Current scope is bar received count
  5092. *
  5093. * @pdev_handle: DP_PDEV handle
  5094. *
  5095. * Return: void
  5096. */
  5097. #define STATS_PROC_TIMEOUT (HZ/1000)
  5098. static void
  5099. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  5100. {
  5101. struct dp_vdev *vdev;
  5102. struct dp_peer *peer;
  5103. uint32_t waitcnt;
  5104. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5105. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5106. if (!peer) {
  5107. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5108. FL("DP Invalid Peer refernce"));
  5109. return;
  5110. }
  5111. if (peer->delete_in_progress) {
  5112. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5113. FL("DP Peer deletion in progress"));
  5114. continue;
  5115. }
  5116. qdf_atomic_inc(&peer->ref_cnt);
  5117. waitcnt = 0;
  5118. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  5119. while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
  5120. && waitcnt < 10) {
  5121. schedule_timeout_interruptible(
  5122. STATS_PROC_TIMEOUT);
  5123. waitcnt++;
  5124. }
  5125. qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
  5126. dp_peer_unref_delete(peer);
  5127. }
  5128. }
  5129. }
  5130. /**
  5131. * dp_rx_bar_stats_cb(): BAR received stats callback
  5132. * @soc: SOC handle
  5133. * @cb_ctxt: Call back context
  5134. * @reo_status: Reo status
  5135. *
  5136. * return: void
  5137. */
  5138. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  5139. union hal_reo_status *reo_status)
  5140. {
  5141. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  5142. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  5143. if (!qdf_atomic_read(&soc->cmn_init_done))
  5144. return;
  5145. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  5146. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  5147. queue_status->header.status);
  5148. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5149. return;
  5150. }
  5151. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  5152. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5153. }
  5154. /**
  5155. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  5156. * @vdev: DP VDEV handle
  5157. *
  5158. * return: void
  5159. */
  5160. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  5161. struct cdp_vdev_stats *vdev_stats)
  5162. {
  5163. struct dp_peer *peer = NULL;
  5164. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  5165. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  5166. dp_update_vdev_stats(vdev_stats, peer);
  5167. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5168. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5169. vdev_stats, vdev->vdev_id,
  5170. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5171. #endif
  5172. }
  5173. /**
  5174. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  5175. * @pdev: DP PDEV handle
  5176. *
  5177. * return: void
  5178. */
  5179. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  5180. {
  5181. struct dp_vdev *vdev = NULL;
  5182. struct cdp_vdev_stats *vdev_stats =
  5183. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5184. if (!vdev_stats) {
  5185. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5186. "DP alloc failure - unable to get alloc vdev stats");
  5187. return;
  5188. }
  5189. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  5190. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  5191. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  5192. if (pdev->mcopy_mode)
  5193. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  5194. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5195. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5196. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5197. dp_update_pdev_stats(pdev, vdev_stats);
  5198. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
  5199. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  5200. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  5201. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  5202. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  5203. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  5204. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  5205. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  5206. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
  5207. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  5208. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
  5209. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  5210. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  5211. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  5212. DP_STATS_AGGR(pdev, vdev,
  5213. tx_i.mcast_en.dropped_map_error);
  5214. DP_STATS_AGGR(pdev, vdev,
  5215. tx_i.mcast_en.dropped_self_mac);
  5216. DP_STATS_AGGR(pdev, vdev,
  5217. tx_i.mcast_en.dropped_send_fail);
  5218. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  5219. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  5220. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  5221. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  5222. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
  5223. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  5224. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
  5225. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
  5226. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
  5227. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
  5228. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
  5229. pdev->stats.tx_i.dropped.dropped_pkt.num =
  5230. pdev->stats.tx_i.dropped.dma_error +
  5231. pdev->stats.tx_i.dropped.ring_full +
  5232. pdev->stats.tx_i.dropped.enqueue_fail +
  5233. pdev->stats.tx_i.dropped.desc_na.num +
  5234. pdev->stats.tx_i.dropped.res_full;
  5235. pdev->stats.tx.last_ack_rssi =
  5236. vdev->stats.tx.last_ack_rssi;
  5237. pdev->stats.tx_i.tso.num_seg =
  5238. vdev->stats.tx_i.tso.num_seg;
  5239. }
  5240. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5241. qdf_mem_free(vdev_stats);
  5242. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5243. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  5244. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  5245. #endif
  5246. }
  5247. /**
  5248. * dp_vdev_getstats() - get vdev packet level stats
  5249. * @vdev_handle: Datapath VDEV handle
  5250. * @stats: cdp network device stats structure
  5251. *
  5252. * Return: void
  5253. */
  5254. static void dp_vdev_getstats(void *vdev_handle,
  5255. struct cdp_dev_stats *stats)
  5256. {
  5257. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5258. struct cdp_vdev_stats *vdev_stats =
  5259. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5260. if (!vdev_stats) {
  5261. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5262. "DP alloc failure - unable to get alloc vdev stats");
  5263. return;
  5264. }
  5265. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5266. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  5267. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  5268. stats->tx_errors = vdev_stats->tx.tx_failed +
  5269. vdev_stats->tx_i.dropped.dropped_pkt.num;
  5270. stats->tx_dropped = stats->tx_errors;
  5271. stats->rx_packets = vdev_stats->rx.unicast.num +
  5272. vdev_stats->rx.multicast.num +
  5273. vdev_stats->rx.bcast.num;
  5274. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  5275. vdev_stats->rx.multicast.bytes +
  5276. vdev_stats->rx.bcast.bytes;
  5277. }
  5278. /**
  5279. * dp_pdev_getstats() - get pdev packet level stats
  5280. * @pdev_handle: Datapath PDEV handle
  5281. * @stats: cdp network device stats structure
  5282. *
  5283. * Return: void
  5284. */
  5285. static void dp_pdev_getstats(void *pdev_handle,
  5286. struct cdp_dev_stats *stats)
  5287. {
  5288. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5289. dp_aggregate_pdev_stats(pdev);
  5290. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  5291. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  5292. stats->tx_errors = pdev->stats.tx.tx_failed +
  5293. pdev->stats.tx_i.dropped.dropped_pkt.num;
  5294. stats->tx_dropped = stats->tx_errors;
  5295. stats->rx_packets = pdev->stats.rx.unicast.num +
  5296. pdev->stats.rx.multicast.num +
  5297. pdev->stats.rx.bcast.num;
  5298. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  5299. pdev->stats.rx.multicast.bytes +
  5300. pdev->stats.rx.bcast.bytes;
  5301. }
  5302. /**
  5303. * dp_get_device_stats() - get interface level packet stats
  5304. * @handle: device handle
  5305. * @stats: cdp network device stats structure
  5306. * @type: device type pdev/vdev
  5307. *
  5308. * Return: void
  5309. */
  5310. static void dp_get_device_stats(void *handle,
  5311. struct cdp_dev_stats *stats, uint8_t type)
  5312. {
  5313. switch (type) {
  5314. case UPDATE_VDEV_STATS:
  5315. dp_vdev_getstats(handle, stats);
  5316. break;
  5317. case UPDATE_PDEV_STATS:
  5318. dp_pdev_getstats(handle, stats);
  5319. break;
  5320. default:
  5321. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5322. "apstats cannot be updated for this input "
  5323. "type %d", type);
  5324. break;
  5325. }
  5326. }
  5327. /**
  5328. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  5329. * @pdev: DP_PDEV Handle
  5330. *
  5331. * Return:void
  5332. */
  5333. static inline void
  5334. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  5335. {
  5336. uint8_t index = 0;
  5337. DP_PRINT_STATS("PDEV Tx Stats:\n");
  5338. DP_PRINT_STATS("Received From Stack:");
  5339. DP_PRINT_STATS(" Packets = %d",
  5340. pdev->stats.tx_i.rcvd.num);
  5341. DP_PRINT_STATS(" Bytes = %llu",
  5342. pdev->stats.tx_i.rcvd.bytes);
  5343. DP_PRINT_STATS("Processed:");
  5344. DP_PRINT_STATS(" Packets = %d",
  5345. pdev->stats.tx_i.processed.num);
  5346. DP_PRINT_STATS(" Bytes = %llu",
  5347. pdev->stats.tx_i.processed.bytes);
  5348. DP_PRINT_STATS("Total Completions:");
  5349. DP_PRINT_STATS(" Packets = %u",
  5350. pdev->stats.tx.comp_pkt.num);
  5351. DP_PRINT_STATS(" Bytes = %llu",
  5352. pdev->stats.tx.comp_pkt.bytes);
  5353. DP_PRINT_STATS("Successful Completions:");
  5354. DP_PRINT_STATS(" Packets = %u",
  5355. pdev->stats.tx.tx_success.num);
  5356. DP_PRINT_STATS(" Bytes = %llu",
  5357. pdev->stats.tx.tx_success.bytes);
  5358. DP_PRINT_STATS("Dropped:");
  5359. DP_PRINT_STATS(" Total = %d",
  5360. pdev->stats.tx_i.dropped.dropped_pkt.num);
  5361. DP_PRINT_STATS(" Dma_map_error = %d",
  5362. pdev->stats.tx_i.dropped.dma_error);
  5363. DP_PRINT_STATS(" Ring Full = %d",
  5364. pdev->stats.tx_i.dropped.ring_full);
  5365. DP_PRINT_STATS(" Descriptor Not available = %d",
  5366. pdev->stats.tx_i.dropped.desc_na.num);
  5367. DP_PRINT_STATS(" HW enqueue failed= %d",
  5368. pdev->stats.tx_i.dropped.enqueue_fail);
  5369. DP_PRINT_STATS(" Resources Full = %d",
  5370. pdev->stats.tx_i.dropped.res_full);
  5371. DP_PRINT_STATS(" FW removed Pkts = %u",
  5372. pdev->stats.tx.dropped.fw_rem.num);
  5373. DP_PRINT_STATS(" FW removed bytes= %llu",
  5374. pdev->stats.tx.dropped.fw_rem.bytes);
  5375. DP_PRINT_STATS(" FW removed transmitted = %d",
  5376. pdev->stats.tx.dropped.fw_rem_tx);
  5377. DP_PRINT_STATS(" FW removed untransmitted = %d",
  5378. pdev->stats.tx.dropped.fw_rem_notx);
  5379. DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
  5380. pdev->stats.tx.dropped.fw_reason1);
  5381. DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
  5382. pdev->stats.tx.dropped.fw_reason2);
  5383. DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
  5384. pdev->stats.tx.dropped.fw_reason3);
  5385. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  5386. pdev->stats.tx.dropped.age_out);
  5387. DP_PRINT_STATS(" headroom insufficient = %d",
  5388. pdev->stats.tx_i.dropped.headroom_insufficient);
  5389. DP_PRINT_STATS(" Multicast:");
  5390. DP_PRINT_STATS(" Packets: %u",
  5391. pdev->stats.tx.mcast.num);
  5392. DP_PRINT_STATS(" Bytes: %llu",
  5393. pdev->stats.tx.mcast.bytes);
  5394. DP_PRINT_STATS("Scatter Gather:");
  5395. DP_PRINT_STATS(" Packets = %d",
  5396. pdev->stats.tx_i.sg.sg_pkt.num);
  5397. DP_PRINT_STATS(" Bytes = %llu",
  5398. pdev->stats.tx_i.sg.sg_pkt.bytes);
  5399. DP_PRINT_STATS(" Dropped By Host = %d",
  5400. pdev->stats.tx_i.sg.dropped_host.num);
  5401. DP_PRINT_STATS(" Dropped By Target = %d",
  5402. pdev->stats.tx_i.sg.dropped_target);
  5403. DP_PRINT_STATS("TSO:");
  5404. DP_PRINT_STATS(" Number of Segments = %d",
  5405. pdev->stats.tx_i.tso.num_seg);
  5406. DP_PRINT_STATS(" Packets = %d",
  5407. pdev->stats.tx_i.tso.tso_pkt.num);
  5408. DP_PRINT_STATS(" Bytes = %llu",
  5409. pdev->stats.tx_i.tso.tso_pkt.bytes);
  5410. DP_PRINT_STATS(" Dropped By Host = %d",
  5411. pdev->stats.tx_i.tso.dropped_host.num);
  5412. DP_PRINT_STATS("Mcast Enhancement:");
  5413. DP_PRINT_STATS(" Packets = %d",
  5414. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  5415. DP_PRINT_STATS(" Bytes = %llu",
  5416. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  5417. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  5418. pdev->stats.tx_i.mcast_en.dropped_map_error);
  5419. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  5420. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  5421. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  5422. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  5423. DP_PRINT_STATS(" Unicast sent = %d",
  5424. pdev->stats.tx_i.mcast_en.ucast);
  5425. DP_PRINT_STATS("Raw:");
  5426. DP_PRINT_STATS(" Packets = %d",
  5427. pdev->stats.tx_i.raw.raw_pkt.num);
  5428. DP_PRINT_STATS(" Bytes = %llu",
  5429. pdev->stats.tx_i.raw.raw_pkt.bytes);
  5430. DP_PRINT_STATS(" DMA map error = %d",
  5431. pdev->stats.tx_i.raw.dma_map_error);
  5432. DP_PRINT_STATS("Reinjected:");
  5433. DP_PRINT_STATS(" Packets = %d",
  5434. pdev->stats.tx_i.reinject_pkts.num);
  5435. DP_PRINT_STATS(" Bytes = %llu\n",
  5436. pdev->stats.tx_i.reinject_pkts.bytes);
  5437. DP_PRINT_STATS("Inspected:");
  5438. DP_PRINT_STATS(" Packets = %d",
  5439. pdev->stats.tx_i.inspect_pkts.num);
  5440. DP_PRINT_STATS(" Bytes = %llu",
  5441. pdev->stats.tx_i.inspect_pkts.bytes);
  5442. DP_PRINT_STATS("Nawds Multicast:");
  5443. DP_PRINT_STATS(" Packets = %d",
  5444. pdev->stats.tx_i.nawds_mcast.num);
  5445. DP_PRINT_STATS(" Bytes = %llu",
  5446. pdev->stats.tx_i.nawds_mcast.bytes);
  5447. DP_PRINT_STATS("CCE Classified:");
  5448. DP_PRINT_STATS(" CCE Classified Packets: %u",
  5449. pdev->stats.tx_i.cce_classified);
  5450. DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
  5451. pdev->stats.tx_i.cce_classified_raw);
  5452. DP_PRINT_STATS("Mesh stats:");
  5453. DP_PRINT_STATS(" frames to firmware: %u",
  5454. pdev->stats.tx_i.mesh.exception_fw);
  5455. DP_PRINT_STATS(" completions from fw: %u",
  5456. pdev->stats.tx_i.mesh.completion_fw);
  5457. DP_PRINT_STATS("PPDU stats counter");
  5458. for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
  5459. DP_PRINT_STATS(" Tag[%d] = %llu", index,
  5460. pdev->stats.ppdu_stats_counter[index]);
  5461. }
  5462. }
  5463. /**
  5464. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  5465. * @pdev: DP_PDEV Handle
  5466. *
  5467. * Return: void
  5468. */
  5469. static inline void
  5470. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  5471. {
  5472. DP_PRINT_STATS("PDEV Rx Stats:\n");
  5473. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  5474. DP_PRINT_STATS(" Packets = %d %d %d %d",
  5475. pdev->stats.rx.rcvd_reo[0].num,
  5476. pdev->stats.rx.rcvd_reo[1].num,
  5477. pdev->stats.rx.rcvd_reo[2].num,
  5478. pdev->stats.rx.rcvd_reo[3].num);
  5479. DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
  5480. pdev->stats.rx.rcvd_reo[0].bytes,
  5481. pdev->stats.rx.rcvd_reo[1].bytes,
  5482. pdev->stats.rx.rcvd_reo[2].bytes,
  5483. pdev->stats.rx.rcvd_reo[3].bytes);
  5484. DP_PRINT_STATS("Replenished:");
  5485. DP_PRINT_STATS(" Packets = %d",
  5486. pdev->stats.replenish.pkts.num);
  5487. DP_PRINT_STATS(" Bytes = %llu",
  5488. pdev->stats.replenish.pkts.bytes);
  5489. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  5490. pdev->stats.buf_freelist);
  5491. DP_PRINT_STATS(" Low threshold intr = %d",
  5492. pdev->stats.replenish.low_thresh_intrs);
  5493. DP_PRINT_STATS("Dropped:");
  5494. DP_PRINT_STATS(" msdu_not_done = %d",
  5495. pdev->stats.dropped.msdu_not_done);
  5496. DP_PRINT_STATS(" mon_rx_drop = %d",
  5497. pdev->stats.dropped.mon_rx_drop);
  5498. DP_PRINT_STATS(" mec_drop = %d",
  5499. pdev->stats.rx.mec_drop.num);
  5500. DP_PRINT_STATS(" Bytes = %llu",
  5501. pdev->stats.rx.mec_drop.bytes);
  5502. DP_PRINT_STATS("Sent To Stack:");
  5503. DP_PRINT_STATS(" Packets = %d",
  5504. pdev->stats.rx.to_stack.num);
  5505. DP_PRINT_STATS(" Bytes = %llu",
  5506. pdev->stats.rx.to_stack.bytes);
  5507. DP_PRINT_STATS("Multicast/Broadcast:");
  5508. DP_PRINT_STATS(" Packets = %d",
  5509. pdev->stats.rx.multicast.num);
  5510. DP_PRINT_STATS(" Bytes = %llu",
  5511. pdev->stats.rx.multicast.bytes);
  5512. DP_PRINT_STATS("Errors:");
  5513. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  5514. pdev->stats.replenish.rxdma_err);
  5515. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  5516. pdev->stats.err.desc_alloc_fail);
  5517. DP_PRINT_STATS(" IP checksum error = %d",
  5518. pdev->stats.err.ip_csum_err);
  5519. DP_PRINT_STATS(" TCP/UDP checksum error = %d",
  5520. pdev->stats.err.tcp_udp_csum_err);
  5521. /* Get bar_recv_cnt */
  5522. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  5523. DP_PRINT_STATS("BAR Received Count: = %d",
  5524. pdev->stats.rx.bar_recv_cnt);
  5525. }
  5526. /**
  5527. * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
  5528. * @pdev: DP_PDEV Handle
  5529. *
  5530. * Return: void
  5531. */
  5532. static inline void
  5533. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  5534. {
  5535. struct cdp_pdev_mon_stats *rx_mon_stats;
  5536. rx_mon_stats = &pdev->rx_mon_stats;
  5537. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  5538. dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
  5539. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  5540. rx_mon_stats->status_ppdu_done);
  5541. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  5542. rx_mon_stats->dest_ppdu_done);
  5543. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  5544. rx_mon_stats->dest_mpdu_done);
  5545. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  5546. rx_mon_stats->dest_mpdu_drop);
  5547. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  5548. rx_mon_stats->dup_mon_linkdesc_cnt);
  5549. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  5550. rx_mon_stats->dup_mon_buf_cnt);
  5551. }
  5552. /**
  5553. * dp_print_soc_tx_stats(): Print SOC level stats
  5554. * @soc DP_SOC Handle
  5555. *
  5556. * Return: void
  5557. */
  5558. static inline void
  5559. dp_print_soc_tx_stats(struct dp_soc *soc)
  5560. {
  5561. uint8_t desc_pool_id;
  5562. soc->stats.tx.desc_in_use = 0;
  5563. DP_PRINT_STATS("SOC Tx Stats:\n");
  5564. for (desc_pool_id = 0;
  5565. desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5566. desc_pool_id++)
  5567. soc->stats.tx.desc_in_use +=
  5568. soc->tx_desc[desc_pool_id].num_allocated;
  5569. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  5570. soc->stats.tx.desc_in_use);
  5571. DP_PRINT_STATS("Invalid peer:");
  5572. DP_PRINT_STATS(" Packets = %d",
  5573. soc->stats.tx.tx_invalid_peer.num);
  5574. DP_PRINT_STATS(" Bytes = %llu",
  5575. soc->stats.tx.tx_invalid_peer.bytes);
  5576. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  5577. soc->stats.tx.tcl_ring_full[0],
  5578. soc->stats.tx.tcl_ring_full[1],
  5579. soc->stats.tx.tcl_ring_full[2]);
  5580. }
  5581. /**
  5582. * dp_print_soc_rx_stats: Print SOC level Rx stats
  5583. * @soc: DP_SOC Handle
  5584. *
  5585. * Return:void
  5586. */
  5587. static inline void
  5588. dp_print_soc_rx_stats(struct dp_soc *soc)
  5589. {
  5590. uint32_t i;
  5591. char reo_error[DP_REO_ERR_LENGTH];
  5592. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  5593. uint8_t index = 0;
  5594. DP_PRINT_STATS("SOC Rx Stats:\n");
  5595. DP_PRINT_STATS("Fragmented packets: %u",
  5596. soc->stats.rx.rx_frags);
  5597. DP_PRINT_STATS("Reo reinjected packets: %u",
  5598. soc->stats.rx.reo_reinject);
  5599. DP_PRINT_STATS("Errors:\n");
  5600. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  5601. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  5602. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  5603. DP_PRINT_STATS("Invalid RBM = %d",
  5604. soc->stats.rx.err.invalid_rbm);
  5605. DP_PRINT_STATS("Invalid Vdev = %d",
  5606. soc->stats.rx.err.invalid_vdev);
  5607. DP_PRINT_STATS("Invalid Pdev = %d",
  5608. soc->stats.rx.err.invalid_pdev);
  5609. DP_PRINT_STATS("Invalid Peer = %d",
  5610. soc->stats.rx.err.rx_invalid_peer.num);
  5611. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  5612. soc->stats.rx.err.hal_ring_access_fail);
  5613. DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
  5614. DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
  5615. DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
  5616. DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
  5617. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  5618. index += qdf_snprint(&rxdma_error[index],
  5619. DP_RXDMA_ERR_LENGTH - index,
  5620. " %d", soc->stats.rx.err.rxdma_error[i]);
  5621. }
  5622. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  5623. rxdma_error);
  5624. index = 0;
  5625. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  5626. index += qdf_snprint(&reo_error[index],
  5627. DP_REO_ERR_LENGTH - index,
  5628. " %d", soc->stats.rx.err.reo_error[i]);
  5629. }
  5630. DP_PRINT_STATS("REO Error(0-14):%s",
  5631. reo_error);
  5632. }
  5633. /**
  5634. * dp_srng_get_str_from_ring_type() - Return string name for a ring
  5635. * @ring_type: Ring
  5636. *
  5637. * Return: char const pointer
  5638. */
  5639. static inline const
  5640. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  5641. {
  5642. switch (ring_type) {
  5643. case REO_DST:
  5644. return "Reo_dst";
  5645. case REO_EXCEPTION:
  5646. return "Reo_exception";
  5647. case REO_CMD:
  5648. return "Reo_cmd";
  5649. case REO_REINJECT:
  5650. return "Reo_reinject";
  5651. case REO_STATUS:
  5652. return "Reo_status";
  5653. case WBM2SW_RELEASE:
  5654. return "wbm2sw_release";
  5655. case TCL_DATA:
  5656. return "tcl_data";
  5657. case TCL_CMD:
  5658. return "tcl_cmd";
  5659. case TCL_STATUS:
  5660. return "tcl_status";
  5661. case SW2WBM_RELEASE:
  5662. return "sw2wbm_release";
  5663. case RXDMA_BUF:
  5664. return "Rxdma_buf";
  5665. case RXDMA_DST:
  5666. return "Rxdma_dst";
  5667. case RXDMA_MONITOR_BUF:
  5668. return "Rxdma_monitor_buf";
  5669. case RXDMA_MONITOR_DESC:
  5670. return "Rxdma_monitor_desc";
  5671. case RXDMA_MONITOR_STATUS:
  5672. return "Rxdma_monitor_status";
  5673. default:
  5674. dp_err("Invalid ring type");
  5675. break;
  5676. }
  5677. return "Invalid";
  5678. }
  5679. /**
  5680. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  5681. * @soc: DP_SOC handle
  5682. * @srng: DP_SRNG handle
  5683. * @ring_name: SRNG name
  5684. * @ring_type: srng src/dst ring
  5685. *
  5686. * Return: void
  5687. */
  5688. static void
  5689. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  5690. enum hal_ring_type ring_type)
  5691. {
  5692. uint32_t tailp;
  5693. uint32_t headp;
  5694. int32_t hw_headp = -1;
  5695. int32_t hw_tailp = -1;
  5696. const char *ring_name;
  5697. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  5698. if (soc && srng && srng->hal_srng) {
  5699. ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
  5700. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  5701. DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
  5702. ring_name, headp, tailp);
  5703. hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
  5704. &hw_tailp, ring_type);
  5705. DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
  5706. ring_name, hw_headp, hw_tailp);
  5707. }
  5708. }
  5709. /**
  5710. * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
  5711. * on target
  5712. * @pdev: physical device handle
  5713. * @mac_id: mac id
  5714. *
  5715. * Return: void
  5716. */
  5717. static inline
  5718. void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
  5719. {
  5720. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  5721. dp_print_ring_stat_from_hal(pdev->soc,
  5722. &pdev->rxdma_mon_buf_ring[mac_id],
  5723. RXDMA_MONITOR_BUF);
  5724. dp_print_ring_stat_from_hal(pdev->soc,
  5725. &pdev->rxdma_mon_dst_ring[mac_id],
  5726. RXDMA_MONITOR_DST);
  5727. dp_print_ring_stat_from_hal(pdev->soc,
  5728. &pdev->rxdma_mon_desc_ring[mac_id],
  5729. RXDMA_MONITOR_DESC);
  5730. }
  5731. dp_print_ring_stat_from_hal(pdev->soc,
  5732. &pdev->rxdma_mon_status_ring[mac_id],
  5733. RXDMA_MONITOR_STATUS);
  5734. }
  5735. /**
  5736. * dp_print_ring_stats(): Print tail and head pointer
  5737. * @pdev: DP_PDEV handle
  5738. *
  5739. * Return:void
  5740. */
  5741. static inline void
  5742. dp_print_ring_stats(struct dp_pdev *pdev)
  5743. {
  5744. uint32_t i;
  5745. int mac_id;
  5746. dp_print_ring_stat_from_hal(pdev->soc,
  5747. &pdev->soc->reo_exception_ring,
  5748. REO_EXCEPTION);
  5749. dp_print_ring_stat_from_hal(pdev->soc,
  5750. &pdev->soc->reo_reinject_ring,
  5751. REO_REINJECT);
  5752. dp_print_ring_stat_from_hal(pdev->soc,
  5753. &pdev->soc->reo_cmd_ring,
  5754. REO_CMD);
  5755. dp_print_ring_stat_from_hal(pdev->soc,
  5756. &pdev->soc->reo_status_ring,
  5757. REO_STATUS);
  5758. dp_print_ring_stat_from_hal(pdev->soc,
  5759. &pdev->soc->rx_rel_ring,
  5760. WBM2SW_RELEASE);
  5761. dp_print_ring_stat_from_hal(pdev->soc,
  5762. &pdev->soc->tcl_cmd_ring,
  5763. TCL_CMD);
  5764. dp_print_ring_stat_from_hal(pdev->soc,
  5765. &pdev->soc->tcl_status_ring,
  5766. TCL_STATUS);
  5767. dp_print_ring_stat_from_hal(pdev->soc,
  5768. &pdev->soc->wbm_desc_rel_ring,
  5769. SW2WBM_RELEASE);
  5770. for (i = 0; i < MAX_REO_DEST_RINGS; i++)
  5771. dp_print_ring_stat_from_hal(pdev->soc,
  5772. &pdev->soc->reo_dest_ring[i],
  5773. REO_DST);
  5774. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
  5775. dp_print_ring_stat_from_hal(pdev->soc,
  5776. &pdev->soc->tcl_data_ring[i],
  5777. TCL_DATA);
  5778. for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
  5779. dp_print_ring_stat_from_hal(pdev->soc,
  5780. &pdev->soc->tx_comp_ring[i],
  5781. WBM2SW_RELEASE);
  5782. dp_print_ring_stat_from_hal(pdev->soc,
  5783. &pdev->rx_refill_buf_ring,
  5784. RXDMA_BUF);
  5785. dp_print_ring_stat_from_hal(pdev->soc,
  5786. &pdev->rx_refill_buf_ring2,
  5787. RXDMA_BUF);
  5788. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  5789. dp_print_ring_stat_from_hal(pdev->soc,
  5790. &pdev->rx_mac_buf_ring[i],
  5791. RXDMA_BUF);
  5792. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
  5793. dp_print_mon_ring_stat_from_hal(pdev, mac_id);
  5794. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
  5795. dp_print_ring_stat_from_hal(pdev->soc,
  5796. &pdev->rxdma_err_dst_ring[i],
  5797. RXDMA_DST);
  5798. }
  5799. /**
  5800. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  5801. * @vdev: DP_VDEV handle
  5802. *
  5803. * Return:void
  5804. */
  5805. static inline void
  5806. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  5807. {
  5808. struct dp_peer *peer = NULL;
  5809. DP_STATS_CLR(vdev->pdev);
  5810. DP_STATS_CLR(vdev->pdev->soc);
  5811. DP_STATS_CLR(vdev);
  5812. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5813. if (!peer)
  5814. return;
  5815. DP_STATS_CLR(peer);
  5816. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5817. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5818. &peer->stats, peer->peer_ids[0],
  5819. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  5820. #endif
  5821. }
  5822. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5823. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5824. &vdev->stats, vdev->vdev_id,
  5825. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5826. #endif
  5827. }
  5828. /**
  5829. * dp_print_common_rates_info(): Print common rate for tx or rx
  5830. * @pkt_type_array: rate type array contains rate info
  5831. *
  5832. * Return:void
  5833. */
  5834. static inline void
  5835. dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
  5836. {
  5837. uint8_t mcs, pkt_type;
  5838. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  5839. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  5840. if (!dp_rate_string[pkt_type][mcs].valid)
  5841. continue;
  5842. DP_PRINT_STATS(" %s = %d",
  5843. dp_rate_string[pkt_type][mcs].mcs_type,
  5844. pkt_type_array[pkt_type].mcs_count[mcs]);
  5845. }
  5846. DP_PRINT_STATS("\n");
  5847. }
  5848. }
  5849. /**
  5850. * dp_print_rx_rates(): Print Rx rate stats
  5851. * @vdev: DP_VDEV handle
  5852. *
  5853. * Return:void
  5854. */
  5855. static inline void
  5856. dp_print_rx_rates(struct dp_vdev *vdev)
  5857. {
  5858. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5859. uint8_t i;
  5860. uint8_t index = 0;
  5861. char nss[DP_NSS_LENGTH];
  5862. DP_PRINT_STATS("Rx Rate Info:\n");
  5863. dp_print_common_rates_info(pdev->stats.rx.pkt_type);
  5864. index = 0;
  5865. for (i = 0; i < SS_COUNT; i++) {
  5866. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5867. " %d", pdev->stats.rx.nss[i]);
  5868. }
  5869. DP_PRINT_STATS("NSS(1-8) = %s",
  5870. nss);
  5871. DP_PRINT_STATS("SGI ="
  5872. " 0.8us %d,"
  5873. " 0.4us %d,"
  5874. " 1.6us %d,"
  5875. " 3.2us %d,",
  5876. pdev->stats.rx.sgi_count[0],
  5877. pdev->stats.rx.sgi_count[1],
  5878. pdev->stats.rx.sgi_count[2],
  5879. pdev->stats.rx.sgi_count[3]);
  5880. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5881. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  5882. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  5883. DP_PRINT_STATS("Reception Type ="
  5884. " SU: %d,"
  5885. " MU_MIMO:%d,"
  5886. " MU_OFDMA:%d,"
  5887. " MU_OFDMA_MIMO:%d\n",
  5888. pdev->stats.rx.reception_type[0],
  5889. pdev->stats.rx.reception_type[1],
  5890. pdev->stats.rx.reception_type[2],
  5891. pdev->stats.rx.reception_type[3]);
  5892. DP_PRINT_STATS("Aggregation:\n");
  5893. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  5894. pdev->stats.rx.ampdu_cnt);
  5895. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  5896. pdev->stats.rx.non_ampdu_cnt);
  5897. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  5898. pdev->stats.rx.amsdu_cnt);
  5899. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  5900. pdev->stats.rx.non_amsdu_cnt);
  5901. }
  5902. /**
  5903. * dp_print_tx_rates(): Print tx rates
  5904. * @vdev: DP_VDEV handle
  5905. *
  5906. * Return:void
  5907. */
  5908. static inline void
  5909. dp_print_tx_rates(struct dp_vdev *vdev)
  5910. {
  5911. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5912. uint8_t index;
  5913. char nss[DP_NSS_LENGTH];
  5914. int nss_index;
  5915. DP_PRINT_STATS("Tx Rate Info:\n");
  5916. dp_print_common_rates_info(pdev->stats.tx.pkt_type);
  5917. DP_PRINT_STATS("SGI ="
  5918. " 0.8us %d"
  5919. " 0.4us %d"
  5920. " 1.6us %d"
  5921. " 3.2us %d",
  5922. pdev->stats.tx.sgi_count[0],
  5923. pdev->stats.tx.sgi_count[1],
  5924. pdev->stats.tx.sgi_count[2],
  5925. pdev->stats.tx.sgi_count[3]);
  5926. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5927. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  5928. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  5929. index = 0;
  5930. for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
  5931. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5932. " %d", pdev->stats.tx.nss[nss_index]);
  5933. }
  5934. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  5935. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  5936. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  5937. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  5938. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  5939. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  5940. DP_PRINT_STATS("Aggregation:\n");
  5941. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  5942. pdev->stats.tx.amsdu_cnt);
  5943. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  5944. pdev->stats.tx.non_amsdu_cnt);
  5945. }
  5946. /**
  5947. * dp_print_peer_stats():print peer stats
  5948. * @peer: DP_PEER handle
  5949. *
  5950. * return void
  5951. */
  5952. static inline void dp_print_peer_stats(struct dp_peer *peer)
  5953. {
  5954. uint8_t i;
  5955. uint32_t index;
  5956. char nss[DP_NSS_LENGTH];
  5957. DP_PRINT_STATS("Node Tx Stats:\n");
  5958. DP_PRINT_STATS("Total Packet Completions = %d",
  5959. peer->stats.tx.comp_pkt.num);
  5960. DP_PRINT_STATS("Total Bytes Completions = %llu",
  5961. peer->stats.tx.comp_pkt.bytes);
  5962. DP_PRINT_STATS("Success Packets = %d",
  5963. peer->stats.tx.tx_success.num);
  5964. DP_PRINT_STATS("Success Bytes = %llu",
  5965. peer->stats.tx.tx_success.bytes);
  5966. DP_PRINT_STATS("Unicast Success Packets = %d",
  5967. peer->stats.tx.ucast.num);
  5968. DP_PRINT_STATS("Unicast Success Bytes = %llu",
  5969. peer->stats.tx.ucast.bytes);
  5970. DP_PRINT_STATS("Multicast Success Packets = %d",
  5971. peer->stats.tx.mcast.num);
  5972. DP_PRINT_STATS("Multicast Success Bytes = %llu",
  5973. peer->stats.tx.mcast.bytes);
  5974. DP_PRINT_STATS("Broadcast Success Packets = %d",
  5975. peer->stats.tx.bcast.num);
  5976. DP_PRINT_STATS("Broadcast Success Bytes = %llu",
  5977. peer->stats.tx.bcast.bytes);
  5978. DP_PRINT_STATS("Packets Failed = %d",
  5979. peer->stats.tx.tx_failed);
  5980. DP_PRINT_STATS("Packets In OFDMA = %d",
  5981. peer->stats.tx.ofdma);
  5982. DP_PRINT_STATS("Packets In STBC = %d",
  5983. peer->stats.tx.stbc);
  5984. DP_PRINT_STATS("Packets In LDPC = %d",
  5985. peer->stats.tx.ldpc);
  5986. DP_PRINT_STATS("Packet Retries = %d",
  5987. peer->stats.tx.retries);
  5988. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  5989. peer->stats.tx.amsdu_cnt);
  5990. DP_PRINT_STATS("Last Packet RSSI = %d",
  5991. peer->stats.tx.last_ack_rssi);
  5992. DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
  5993. peer->stats.tx.dropped.fw_rem.num);
  5994. DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
  5995. peer->stats.tx.dropped.fw_rem.bytes);
  5996. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  5997. peer->stats.tx.dropped.fw_rem_tx);
  5998. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  5999. peer->stats.tx.dropped.fw_rem_notx);
  6000. DP_PRINT_STATS("Dropped : Age Out = %d",
  6001. peer->stats.tx.dropped.age_out);
  6002. DP_PRINT_STATS("NAWDS : ");
  6003. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  6004. peer->stats.tx.nawds_mcast_drop);
  6005. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  6006. peer->stats.tx.nawds_mcast.num);
  6007. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
  6008. peer->stats.tx.nawds_mcast.bytes);
  6009. DP_PRINT_STATS("Rate Info:");
  6010. dp_print_common_rates_info(peer->stats.tx.pkt_type);
  6011. DP_PRINT_STATS("SGI = "
  6012. " 0.8us %d"
  6013. " 0.4us %d"
  6014. " 1.6us %d"
  6015. " 3.2us %d",
  6016. peer->stats.tx.sgi_count[0],
  6017. peer->stats.tx.sgi_count[1],
  6018. peer->stats.tx.sgi_count[2],
  6019. peer->stats.tx.sgi_count[3]);
  6020. DP_PRINT_STATS("Excess Retries per AC ");
  6021. DP_PRINT_STATS(" Best effort = %d",
  6022. peer->stats.tx.excess_retries_per_ac[0]);
  6023. DP_PRINT_STATS(" Background= %d",
  6024. peer->stats.tx.excess_retries_per_ac[1]);
  6025. DP_PRINT_STATS(" Video = %d",
  6026. peer->stats.tx.excess_retries_per_ac[2]);
  6027. DP_PRINT_STATS(" Voice = %d",
  6028. peer->stats.tx.excess_retries_per_ac[3]);
  6029. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  6030. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  6031. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  6032. index = 0;
  6033. for (i = 0; i < SS_COUNT; i++) {
  6034. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6035. " %d", peer->stats.tx.nss[i]);
  6036. }
  6037. DP_PRINT_STATS("NSS(1-8) = %s",
  6038. nss);
  6039. DP_PRINT_STATS("Aggregation:");
  6040. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  6041. peer->stats.tx.amsdu_cnt);
  6042. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  6043. peer->stats.tx.non_amsdu_cnt);
  6044. DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
  6045. DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
  6046. peer->stats.tx.tx_byte_rate);
  6047. DP_PRINT_STATS(" Data transmitted in last sec: %d",
  6048. peer->stats.tx.tx_data_rate);
  6049. DP_PRINT_STATS("Node Rx Stats:");
  6050. DP_PRINT_STATS("Packets Sent To Stack = %d",
  6051. peer->stats.rx.to_stack.num);
  6052. DP_PRINT_STATS("Bytes Sent To Stack = %llu",
  6053. peer->stats.rx.to_stack.bytes);
  6054. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  6055. DP_PRINT_STATS("Ring Id = %d", i);
  6056. DP_PRINT_STATS(" Packets Received = %d",
  6057. peer->stats.rx.rcvd_reo[i].num);
  6058. DP_PRINT_STATS(" Bytes Received = %llu",
  6059. peer->stats.rx.rcvd_reo[i].bytes);
  6060. }
  6061. DP_PRINT_STATS("Multicast Packets Received = %d",
  6062. peer->stats.rx.multicast.num);
  6063. DP_PRINT_STATS("Multicast Bytes Received = %llu",
  6064. peer->stats.rx.multicast.bytes);
  6065. DP_PRINT_STATS("Broadcast Packets Received = %d",
  6066. peer->stats.rx.bcast.num);
  6067. DP_PRINT_STATS("Broadcast Bytes Received = %llu",
  6068. peer->stats.rx.bcast.bytes);
  6069. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  6070. peer->stats.rx.intra_bss.pkts.num);
  6071. DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
  6072. peer->stats.rx.intra_bss.pkts.bytes);
  6073. DP_PRINT_STATS("Raw Packets Received = %d",
  6074. peer->stats.rx.raw.num);
  6075. DP_PRINT_STATS("Raw Bytes Received = %llu",
  6076. peer->stats.rx.raw.bytes);
  6077. DP_PRINT_STATS("Errors: MIC Errors = %d",
  6078. peer->stats.rx.err.mic_err);
  6079. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  6080. peer->stats.rx.err.decrypt_err);
  6081. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  6082. peer->stats.rx.non_ampdu_cnt);
  6083. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  6084. peer->stats.rx.ampdu_cnt);
  6085. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  6086. peer->stats.rx.non_amsdu_cnt);
  6087. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  6088. peer->stats.rx.amsdu_cnt);
  6089. DP_PRINT_STATS("NAWDS : ");
  6090. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  6091. peer->stats.rx.nawds_mcast_drop);
  6092. DP_PRINT_STATS("SGI ="
  6093. " 0.8us %d"
  6094. " 0.4us %d"
  6095. " 1.6us %d"
  6096. " 3.2us %d",
  6097. peer->stats.rx.sgi_count[0],
  6098. peer->stats.rx.sgi_count[1],
  6099. peer->stats.rx.sgi_count[2],
  6100. peer->stats.rx.sgi_count[3]);
  6101. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  6102. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  6103. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  6104. DP_PRINT_STATS("Reception Type ="
  6105. " SU %d,"
  6106. " MU_MIMO %d,"
  6107. " MU_OFDMA %d,"
  6108. " MU_OFDMA_MIMO %d",
  6109. peer->stats.rx.reception_type[0],
  6110. peer->stats.rx.reception_type[1],
  6111. peer->stats.rx.reception_type[2],
  6112. peer->stats.rx.reception_type[3]);
  6113. dp_print_common_rates_info(peer->stats.rx.pkt_type);
  6114. index = 0;
  6115. for (i = 0; i < SS_COUNT; i++) {
  6116. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6117. " %d", peer->stats.rx.nss[i]);
  6118. }
  6119. DP_PRINT_STATS("NSS(1-8) = %s",
  6120. nss);
  6121. DP_PRINT_STATS("Aggregation:");
  6122. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  6123. peer->stats.rx.ampdu_cnt);
  6124. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  6125. peer->stats.rx.non_ampdu_cnt);
  6126. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  6127. peer->stats.rx.amsdu_cnt);
  6128. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  6129. peer->stats.rx.non_amsdu_cnt);
  6130. DP_PRINT_STATS("Bytes and Packets received in last one sec:");
  6131. DP_PRINT_STATS(" Bytes received in last sec: %d",
  6132. peer->stats.rx.rx_byte_rate);
  6133. DP_PRINT_STATS(" Data received in last sec: %d",
  6134. peer->stats.rx.rx_data_rate);
  6135. }
  6136. /*
  6137. * dp_get_host_peer_stats()- function to print peer stats
  6138. * @pdev_handle: DP_PDEV handle
  6139. * @mac_addr: mac address of the peer
  6140. *
  6141. * Return: void
  6142. */
  6143. static void
  6144. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  6145. {
  6146. struct dp_peer *peer;
  6147. uint8_t local_id;
  6148. if (!mac_addr) {
  6149. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6150. "Invalid MAC address\n");
  6151. return;
  6152. }
  6153. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  6154. &local_id);
  6155. if (!peer) {
  6156. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6157. "%s: Invalid peer\n", __func__);
  6158. return;
  6159. }
  6160. dp_print_peer_stats(peer);
  6161. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6162. }
  6163. /**
  6164. * dp_print_soc_cfg_params()- Dump soc wlan config parameters
  6165. * @soc_handle: Soc handle
  6166. *
  6167. * Return: void
  6168. */
  6169. static void
  6170. dp_print_soc_cfg_params(struct dp_soc *soc)
  6171. {
  6172. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  6173. uint8_t index = 0, i = 0;
  6174. char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
  6175. int num_of_int_contexts;
  6176. if (!soc) {
  6177. dp_err("Context is null");
  6178. return;
  6179. }
  6180. soc_cfg_ctx = soc->wlan_cfg_ctx;
  6181. if (!soc_cfg_ctx) {
  6182. dp_err("Context is null");
  6183. return;
  6184. }
  6185. num_of_int_contexts =
  6186. wlan_cfg_get_num_contexts(soc_cfg_ctx);
  6187. DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
  6188. soc_cfg_ctx->num_int_ctxts);
  6189. DP_TRACE_STATS(DEBUG, "Max clients: %u",
  6190. soc_cfg_ctx->max_clients);
  6191. DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
  6192. soc_cfg_ctx->max_alloc_size);
  6193. DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
  6194. soc_cfg_ctx->per_pdev_tx_ring);
  6195. DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
  6196. soc_cfg_ctx->num_tcl_data_rings);
  6197. DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
  6198. soc_cfg_ctx->per_pdev_rx_ring);
  6199. DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
  6200. soc_cfg_ctx->per_pdev_lmac_ring);
  6201. DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
  6202. soc_cfg_ctx->num_reo_dest_rings);
  6203. DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
  6204. soc_cfg_ctx->num_tx_desc_pool);
  6205. DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
  6206. soc_cfg_ctx->num_tx_ext_desc_pool);
  6207. DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
  6208. soc_cfg_ctx->num_tx_desc);
  6209. DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
  6210. soc_cfg_ctx->num_tx_ext_desc);
  6211. DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
  6212. soc_cfg_ctx->htt_packet_type);
  6213. DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
  6214. soc_cfg_ctx->max_peer_id);
  6215. DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
  6216. soc_cfg_ctx->tx_ring_size);
  6217. DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
  6218. soc_cfg_ctx->tx_comp_ring_size);
  6219. DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
  6220. soc_cfg_ctx->tx_comp_ring_size_nss);
  6221. DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
  6222. soc_cfg_ctx->int_batch_threshold_tx);
  6223. DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
  6224. soc_cfg_ctx->int_timer_threshold_tx);
  6225. DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
  6226. soc_cfg_ctx->int_batch_threshold_rx);
  6227. DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
  6228. soc_cfg_ctx->int_timer_threshold_rx);
  6229. DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
  6230. soc_cfg_ctx->int_batch_threshold_other);
  6231. DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
  6232. soc_cfg_ctx->int_timer_threshold_other);
  6233. for (i = 0; i < num_of_int_contexts; i++) {
  6234. index += qdf_snprint(&ring_mask[index],
  6235. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6236. " %d",
  6237. soc_cfg_ctx->int_tx_ring_mask[i]);
  6238. }
  6239. DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
  6240. num_of_int_contexts, ring_mask);
  6241. index = 0;
  6242. for (i = 0; i < num_of_int_contexts; i++) {
  6243. index += qdf_snprint(&ring_mask[index],
  6244. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6245. " %d",
  6246. soc_cfg_ctx->int_rx_ring_mask[i]);
  6247. }
  6248. DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
  6249. num_of_int_contexts, ring_mask);
  6250. index = 0;
  6251. for (i = 0; i < num_of_int_contexts; i++) {
  6252. index += qdf_snprint(&ring_mask[index],
  6253. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6254. " %d",
  6255. soc_cfg_ctx->int_rx_mon_ring_mask[i]);
  6256. }
  6257. DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
  6258. num_of_int_contexts, ring_mask);
  6259. index = 0;
  6260. for (i = 0; i < num_of_int_contexts; i++) {
  6261. index += qdf_snprint(&ring_mask[index],
  6262. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6263. " %d",
  6264. soc_cfg_ctx->int_rx_err_ring_mask[i]);
  6265. }
  6266. DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
  6267. num_of_int_contexts, ring_mask);
  6268. index = 0;
  6269. for (i = 0; i < num_of_int_contexts; i++) {
  6270. index += qdf_snprint(&ring_mask[index],
  6271. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6272. " %d",
  6273. soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
  6274. }
  6275. DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
  6276. num_of_int_contexts, ring_mask);
  6277. index = 0;
  6278. for (i = 0; i < num_of_int_contexts; i++) {
  6279. index += qdf_snprint(&ring_mask[index],
  6280. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6281. " %d",
  6282. soc_cfg_ctx->int_reo_status_ring_mask[i]);
  6283. }
  6284. DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
  6285. num_of_int_contexts, ring_mask);
  6286. index = 0;
  6287. for (i = 0; i < num_of_int_contexts; i++) {
  6288. index += qdf_snprint(&ring_mask[index],
  6289. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6290. " %d",
  6291. soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
  6292. }
  6293. DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
  6294. num_of_int_contexts, ring_mask);
  6295. index = 0;
  6296. for (i = 0; i < num_of_int_contexts; i++) {
  6297. index += qdf_snprint(&ring_mask[index],
  6298. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6299. " %d",
  6300. soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
  6301. }
  6302. DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
  6303. num_of_int_contexts, ring_mask);
  6304. DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
  6305. soc_cfg_ctx->rx_hash);
  6306. DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
  6307. soc_cfg_ctx->tso_enabled);
  6308. DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
  6309. soc_cfg_ctx->lro_enabled);
  6310. DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
  6311. soc_cfg_ctx->sg_enabled);
  6312. DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
  6313. soc_cfg_ctx->gro_enabled);
  6314. DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
  6315. soc_cfg_ctx->rawmode_enabled);
  6316. DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
  6317. soc_cfg_ctx->peer_flow_ctrl_enabled);
  6318. DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
  6319. soc_cfg_ctx->napi_enabled);
  6320. DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
  6321. soc_cfg_ctx->tcp_udp_checksumoffload);
  6322. DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
  6323. soc_cfg_ctx->defrag_timeout_check);
  6324. DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
  6325. soc_cfg_ctx->rx_defrag_min_timeout);
  6326. DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
  6327. soc_cfg_ctx->wbm_release_ring);
  6328. DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
  6329. soc_cfg_ctx->tcl_cmd_ring);
  6330. DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
  6331. soc_cfg_ctx->tcl_status_ring);
  6332. DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
  6333. soc_cfg_ctx->reo_reinject_ring);
  6334. DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
  6335. soc_cfg_ctx->rx_release_ring);
  6336. DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
  6337. soc_cfg_ctx->reo_exception_ring);
  6338. DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
  6339. soc_cfg_ctx->reo_cmd_ring);
  6340. DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
  6341. soc_cfg_ctx->reo_status_ring);
  6342. DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
  6343. soc_cfg_ctx->rxdma_refill_ring);
  6344. DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
  6345. soc_cfg_ctx->rxdma_err_dst_ring);
  6346. }
  6347. /**
  6348. * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
  6349. * @pdev_handle: DP pdev handle
  6350. *
  6351. * Return - void
  6352. */
  6353. static void
  6354. dp_print_pdev_cfg_params(struct dp_pdev *pdev)
  6355. {
  6356. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  6357. if (!pdev) {
  6358. dp_err("Context is null");
  6359. return;
  6360. }
  6361. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  6362. if (!pdev_cfg_ctx) {
  6363. dp_err("Context is null");
  6364. return;
  6365. }
  6366. DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
  6367. pdev_cfg_ctx->rx_dma_buf_ring_size);
  6368. DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
  6369. pdev_cfg_ctx->dma_mon_buf_ring_size);
  6370. DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
  6371. pdev_cfg_ctx->dma_mon_dest_ring_size);
  6372. DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
  6373. pdev_cfg_ctx->dma_mon_status_ring_size);
  6374. DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
  6375. pdev_cfg_ctx->rxdma_monitor_desc_ring);
  6376. DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
  6377. pdev_cfg_ctx->num_mac_rings);
  6378. }
  6379. /**
  6380. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6381. *
  6382. * Return: None
  6383. */
  6384. static void dp_txrx_stats_help(void)
  6385. {
  6386. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6387. dp_info("stats_option:");
  6388. dp_info(" 1 -- HTT Tx Statistics");
  6389. dp_info(" 2 -- HTT Rx Statistics");
  6390. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6391. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6392. dp_info(" 5 -- HTT Error Statistics");
  6393. dp_info(" 6 -- HTT TQM Statistics");
  6394. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6395. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6396. dp_info(" 9 -- HTT Tx Rate Statistics");
  6397. dp_info(" 10 -- HTT Rx Rate Statistics");
  6398. dp_info(" 11 -- HTT Peer Statistics");
  6399. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6400. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6401. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6402. dp_info(" 15 -- HTT SRNG Statistics");
  6403. dp_info(" 16 -- HTT SFM Info Statistics");
  6404. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6405. dp_info(" 18 -- HTT Peer List Details");
  6406. dp_info(" 20 -- Clear Host Statistics");
  6407. dp_info(" 21 -- Host Rx Rate Statistics");
  6408. dp_info(" 22 -- Host Tx Rate Statistics");
  6409. dp_info(" 23 -- Host Tx Statistics");
  6410. dp_info(" 24 -- Host Rx Statistics");
  6411. dp_info(" 25 -- Host AST Statistics");
  6412. dp_info(" 26 -- Host SRNG PTR Statistics");
  6413. dp_info(" 27 -- Host Mon Statistics");
  6414. dp_info(" 28 -- Host REO Queue Statistics");
  6415. dp_info(" 29 -- Host Soc cfg param Statistics");
  6416. dp_info(" 30 -- Host pdev cfg param Statistics");
  6417. }
  6418. /**
  6419. * dp_print_host_stats()- Function to print the stats aggregated at host
  6420. * @vdev_handle: DP_VDEV handle
  6421. * @type: host stats type
  6422. *
  6423. * Return: 0 on success, print error message in case of failure
  6424. */
  6425. static int
  6426. dp_print_host_stats(struct cdp_vdev *vdev_handle,
  6427. struct cdp_txrx_stats_req *req)
  6428. {
  6429. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6430. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6431. enum cdp_host_txrx_stats type =
  6432. dp_stats_mapping_table[req->stats][STATS_HOST];
  6433. dp_aggregate_pdev_stats(pdev);
  6434. switch (type) {
  6435. case TXRX_CLEAR_STATS:
  6436. dp_txrx_host_stats_clr(vdev);
  6437. break;
  6438. case TXRX_RX_RATE_STATS:
  6439. dp_print_rx_rates(vdev);
  6440. break;
  6441. case TXRX_TX_RATE_STATS:
  6442. dp_print_tx_rates(vdev);
  6443. break;
  6444. case TXRX_TX_HOST_STATS:
  6445. dp_print_pdev_tx_stats(pdev);
  6446. dp_print_soc_tx_stats(pdev->soc);
  6447. break;
  6448. case TXRX_RX_HOST_STATS:
  6449. dp_print_pdev_rx_stats(pdev);
  6450. dp_print_soc_rx_stats(pdev->soc);
  6451. break;
  6452. case TXRX_AST_STATS:
  6453. dp_print_ast_stats(pdev->soc);
  6454. dp_print_peer_table(vdev);
  6455. break;
  6456. case TXRX_SRNG_PTR_STATS:
  6457. dp_print_ring_stats(pdev);
  6458. break;
  6459. case TXRX_RX_MON_STATS:
  6460. dp_print_pdev_rx_mon_stats(pdev);
  6461. break;
  6462. case TXRX_REO_QUEUE_STATS:
  6463. dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
  6464. break;
  6465. case TXRX_SOC_CFG_PARAMS:
  6466. dp_print_soc_cfg_params(pdev->soc);
  6467. break;
  6468. case TXRX_PDEV_CFG_PARAMS:
  6469. dp_print_pdev_cfg_params(pdev);
  6470. break;
  6471. default:
  6472. dp_info("Wrong Input For TxRx Host Stats");
  6473. dp_txrx_stats_help();
  6474. break;
  6475. }
  6476. return 0;
  6477. }
  6478. /*
  6479. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6480. * @pdev: DP_PDEV handle
  6481. *
  6482. * Return: void
  6483. */
  6484. static void
  6485. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6486. {
  6487. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6488. int mac_id;
  6489. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  6490. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6491. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6492. pdev->pdev_id);
  6493. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6494. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6495. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6496. }
  6497. }
  6498. /*
  6499. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6500. * @pdev: DP_PDEV handle
  6501. *
  6502. * Return: void
  6503. */
  6504. static void
  6505. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6506. {
  6507. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6508. int mac_id;
  6509. htt_tlv_filter.mpdu_start = 1;
  6510. htt_tlv_filter.msdu_start = 0;
  6511. htt_tlv_filter.packet = 0;
  6512. htt_tlv_filter.msdu_end = 0;
  6513. htt_tlv_filter.mpdu_end = 0;
  6514. htt_tlv_filter.attention = 0;
  6515. htt_tlv_filter.ppdu_start = 1;
  6516. htt_tlv_filter.ppdu_end = 1;
  6517. htt_tlv_filter.ppdu_end_user_stats = 1;
  6518. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6519. htt_tlv_filter.ppdu_end_status_done = 1;
  6520. htt_tlv_filter.enable_fp = 1;
  6521. htt_tlv_filter.enable_md = 0;
  6522. if (pdev->neighbour_peers_added &&
  6523. pdev->soc->hw_nac_monitor_support) {
  6524. htt_tlv_filter.enable_md = 1;
  6525. htt_tlv_filter.packet_header = 1;
  6526. }
  6527. if (pdev->mcopy_mode) {
  6528. htt_tlv_filter.packet_header = 1;
  6529. htt_tlv_filter.enable_mo = 1;
  6530. }
  6531. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6532. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6533. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6534. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6535. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6536. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6537. if (pdev->neighbour_peers_added &&
  6538. pdev->soc->hw_nac_monitor_support)
  6539. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6540. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6541. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6542. pdev->pdev_id);
  6543. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6544. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6545. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6546. }
  6547. }
  6548. /*
  6549. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6550. * modes are enabled or not.
  6551. * @dp_pdev: dp pdev handle.
  6552. *
  6553. * Return: bool
  6554. */
  6555. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6556. {
  6557. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6558. !pdev->mcopy_mode)
  6559. return true;
  6560. else
  6561. return false;
  6562. }
  6563. /*
  6564. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6565. *@pdev_handle: DP_PDEV handle.
  6566. *@val: Provided value.
  6567. *
  6568. *Return: 0 for success. nonzero for failure.
  6569. */
  6570. static QDF_STATUS
  6571. dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
  6572. {
  6573. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6574. switch (val) {
  6575. case CDP_BPR_DISABLE:
  6576. pdev->bpr_enable = CDP_BPR_DISABLE;
  6577. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6578. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6579. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6580. } else if (pdev->enhanced_stats_en &&
  6581. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6582. !pdev->pktlog_ppdu_stats) {
  6583. dp_h2t_cfg_stats_msg_send(pdev,
  6584. DP_PPDU_STATS_CFG_ENH_STATS,
  6585. pdev->pdev_id);
  6586. }
  6587. break;
  6588. case CDP_BPR_ENABLE:
  6589. pdev->bpr_enable = CDP_BPR_ENABLE;
  6590. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6591. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6592. dp_h2t_cfg_stats_msg_send(pdev,
  6593. DP_PPDU_STATS_CFG_BPR,
  6594. pdev->pdev_id);
  6595. } else if (pdev->enhanced_stats_en &&
  6596. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6597. !pdev->pktlog_ppdu_stats) {
  6598. dp_h2t_cfg_stats_msg_send(pdev,
  6599. DP_PPDU_STATS_CFG_BPR_ENH,
  6600. pdev->pdev_id);
  6601. } else if (pdev->pktlog_ppdu_stats) {
  6602. dp_h2t_cfg_stats_msg_send(pdev,
  6603. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6604. pdev->pdev_id);
  6605. }
  6606. break;
  6607. default:
  6608. break;
  6609. }
  6610. return QDF_STATUS_SUCCESS;
  6611. }
  6612. /*
  6613. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6614. * @pdev_handle: DP_PDEV handle
  6615. * @val: user provided value
  6616. *
  6617. * Return: 0 for success. nonzero for failure.
  6618. */
  6619. static QDF_STATUS
  6620. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6621. {
  6622. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6623. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6624. if (pdev->mcopy_mode)
  6625. dp_reset_monitor_mode(pdev_handle);
  6626. switch (val) {
  6627. case 0:
  6628. pdev->tx_sniffer_enable = 0;
  6629. pdev->mcopy_mode = 0;
  6630. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6631. !pdev->bpr_enable) {
  6632. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6633. dp_ppdu_ring_reset(pdev);
  6634. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6635. dp_h2t_cfg_stats_msg_send(pdev,
  6636. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6637. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6638. dp_h2t_cfg_stats_msg_send(pdev,
  6639. DP_PPDU_STATS_CFG_BPR_ENH,
  6640. pdev->pdev_id);
  6641. } else {
  6642. dp_h2t_cfg_stats_msg_send(pdev,
  6643. DP_PPDU_STATS_CFG_BPR,
  6644. pdev->pdev_id);
  6645. }
  6646. break;
  6647. case 1:
  6648. pdev->tx_sniffer_enable = 1;
  6649. pdev->mcopy_mode = 0;
  6650. if (!pdev->pktlog_ppdu_stats)
  6651. dp_h2t_cfg_stats_msg_send(pdev,
  6652. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6653. break;
  6654. case 2:
  6655. if (pdev->monitor_vdev) {
  6656. status = QDF_STATUS_E_RESOURCES;
  6657. break;
  6658. }
  6659. pdev->mcopy_mode = 1;
  6660. dp_pdev_configure_monitor_rings(pdev);
  6661. pdev->tx_sniffer_enable = 0;
  6662. if (!pdev->pktlog_ppdu_stats)
  6663. dp_h2t_cfg_stats_msg_send(pdev,
  6664. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6665. break;
  6666. default:
  6667. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6668. "Invalid value");
  6669. break;
  6670. }
  6671. return status;
  6672. }
  6673. /*
  6674. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6675. * @pdev_handle: DP_PDEV handle
  6676. *
  6677. * Return: void
  6678. */
  6679. static void
  6680. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6681. {
  6682. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6683. if (pdev->enhanced_stats_en == 0)
  6684. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6685. pdev->enhanced_stats_en = 1;
  6686. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6687. !pdev->monitor_vdev)
  6688. dp_ppdu_ring_cfg(pdev);
  6689. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6690. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6691. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6692. dp_h2t_cfg_stats_msg_send(pdev,
  6693. DP_PPDU_STATS_CFG_BPR_ENH,
  6694. pdev->pdev_id);
  6695. }
  6696. }
  6697. /*
  6698. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6699. * @pdev_handle: DP_PDEV handle
  6700. *
  6701. * Return: void
  6702. */
  6703. static void
  6704. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6705. {
  6706. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6707. if (pdev->enhanced_stats_en == 1)
  6708. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6709. pdev->enhanced_stats_en = 0;
  6710. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6711. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6712. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6713. dp_h2t_cfg_stats_msg_send(pdev,
  6714. DP_PPDU_STATS_CFG_BPR,
  6715. pdev->pdev_id);
  6716. }
  6717. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6718. !pdev->monitor_vdev)
  6719. dp_ppdu_ring_reset(pdev);
  6720. }
  6721. /*
  6722. * dp_get_fw_peer_stats()- function to print peer stats
  6723. * @pdev_handle: DP_PDEV handle
  6724. * @mac_addr: mac address of the peer
  6725. * @cap: Type of htt stats requested
  6726. *
  6727. * Currently Supporting only MAC ID based requests Only
  6728. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6729. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6730. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6731. *
  6732. * Return: void
  6733. */
  6734. static void
  6735. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  6736. uint32_t cap)
  6737. {
  6738. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6739. int i;
  6740. uint32_t config_param0 = 0;
  6741. uint32_t config_param1 = 0;
  6742. uint32_t config_param2 = 0;
  6743. uint32_t config_param3 = 0;
  6744. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6745. config_param0 |= (1 << (cap + 1));
  6746. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6747. config_param1 |= (1 << i);
  6748. }
  6749. config_param2 |= (mac_addr[0] & 0x000000ff);
  6750. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6751. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6752. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6753. config_param3 |= (mac_addr[4] & 0x000000ff);
  6754. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6755. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6756. config_param0, config_param1, config_param2,
  6757. config_param3, 0, 0, 0);
  6758. }
  6759. /* This struct definition will be removed from here
  6760. * once it get added in FW headers*/
  6761. struct httstats_cmd_req {
  6762. uint32_t config_param0;
  6763. uint32_t config_param1;
  6764. uint32_t config_param2;
  6765. uint32_t config_param3;
  6766. int cookie;
  6767. u_int8_t stats_id;
  6768. };
  6769. /*
  6770. * dp_get_htt_stats: function to process the httstas request
  6771. * @pdev_handle: DP pdev handle
  6772. * @data: pointer to request data
  6773. * @data_len: length for request data
  6774. *
  6775. * return: void
  6776. */
  6777. static void
  6778. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  6779. {
  6780. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6781. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6782. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6783. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6784. req->config_param0, req->config_param1,
  6785. req->config_param2, req->config_param3,
  6786. req->cookie, 0, 0);
  6787. }
  6788. /*
  6789. * dp_set_pdev_param: function to set parameters in pdev
  6790. * @pdev_handle: DP pdev handle
  6791. * @param: parameter type to be set
  6792. * @val: value of parameter to be set
  6793. *
  6794. * Return: 0 for success. nonzero for failure.
  6795. */
  6796. static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6797. enum cdp_pdev_param_type param,
  6798. uint8_t val)
  6799. {
  6800. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6801. switch (param) {
  6802. case CDP_CONFIG_DEBUG_SNIFFER:
  6803. return dp_config_debug_sniffer(pdev_handle, val);
  6804. case CDP_CONFIG_BPR_ENABLE:
  6805. return dp_set_bpr_enable(pdev_handle, val);
  6806. case CDP_CONFIG_PRIMARY_RADIO:
  6807. pdev->is_primary = val;
  6808. break;
  6809. default:
  6810. return QDF_STATUS_E_INVAL;
  6811. }
  6812. return QDF_STATUS_SUCCESS;
  6813. }
  6814. /*
  6815. * dp_get_vdev_param: function to get parameters from vdev
  6816. * @param: parameter type to get value
  6817. *
  6818. * return: void
  6819. */
  6820. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  6821. enum cdp_vdev_param_type param)
  6822. {
  6823. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6824. uint32_t val;
  6825. switch (param) {
  6826. case CDP_ENABLE_WDS:
  6827. val = vdev->wds_enabled;
  6828. break;
  6829. case CDP_ENABLE_MEC:
  6830. val = vdev->mec_enabled;
  6831. break;
  6832. case CDP_ENABLE_DA_WAR:
  6833. val = vdev->da_war_enabled;
  6834. break;
  6835. default:
  6836. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6837. "param value %d is wrong\n",
  6838. param);
  6839. val = -1;
  6840. break;
  6841. }
  6842. return val;
  6843. }
  6844. /*
  6845. * dp_set_vdev_param: function to set parameters in vdev
  6846. * @param: parameter type to be set
  6847. * @val: value of parameter to be set
  6848. *
  6849. * return: void
  6850. */
  6851. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  6852. enum cdp_vdev_param_type param, uint32_t val)
  6853. {
  6854. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6855. switch (param) {
  6856. case CDP_ENABLE_WDS:
  6857. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6858. "wds_enable %d for vdev(%p) id(%d)\n",
  6859. val, vdev, vdev->vdev_id);
  6860. vdev->wds_enabled = val;
  6861. break;
  6862. case CDP_ENABLE_MEC:
  6863. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6864. "mec_enable %d for vdev(%p) id(%d)\n",
  6865. val, vdev, vdev->vdev_id);
  6866. vdev->mec_enabled = val;
  6867. break;
  6868. case CDP_ENABLE_DA_WAR:
  6869. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6870. "da_war_enable %d for vdev(%p) id(%d)\n",
  6871. val, vdev, vdev->vdev_id);
  6872. vdev->da_war_enabled = val;
  6873. break;
  6874. case CDP_ENABLE_NAWDS:
  6875. vdev->nawds_enabled = val;
  6876. break;
  6877. case CDP_ENABLE_MCAST_EN:
  6878. vdev->mcast_enhancement_en = val;
  6879. break;
  6880. case CDP_ENABLE_PROXYSTA:
  6881. vdev->proxysta_vdev = val;
  6882. break;
  6883. case CDP_UPDATE_TDLS_FLAGS:
  6884. vdev->tdls_link_connected = val;
  6885. break;
  6886. case CDP_CFG_WDS_AGING_TIMER:
  6887. if (val == 0)
  6888. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  6889. else if (val != vdev->wds_aging_timer_val)
  6890. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  6891. vdev->wds_aging_timer_val = val;
  6892. break;
  6893. case CDP_ENABLE_AP_BRIDGE:
  6894. if (wlan_op_mode_sta != vdev->opmode)
  6895. vdev->ap_bridge_enabled = val;
  6896. else
  6897. vdev->ap_bridge_enabled = false;
  6898. break;
  6899. case CDP_ENABLE_CIPHER:
  6900. vdev->sec_type = val;
  6901. break;
  6902. case CDP_ENABLE_QWRAP_ISOLATION:
  6903. vdev->isolation_vdev = val;
  6904. break;
  6905. default:
  6906. break;
  6907. }
  6908. dp_tx_vdev_update_search_flags(vdev);
  6909. }
  6910. /**
  6911. * dp_peer_set_nawds: set nawds bit in peer
  6912. * @peer_handle: pointer to peer
  6913. * @value: enable/disable nawds
  6914. *
  6915. * return: void
  6916. */
  6917. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  6918. {
  6919. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6920. peer->nawds_enabled = value;
  6921. }
  6922. /*
  6923. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  6924. * @vdev_handle: DP_VDEV handle
  6925. * @map_id:ID of map that needs to be updated
  6926. *
  6927. * Return: void
  6928. */
  6929. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  6930. uint8_t map_id)
  6931. {
  6932. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6933. vdev->dscp_tid_map_id = map_id;
  6934. return;
  6935. }
  6936. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  6937. * @peer_handle: DP pdev handle
  6938. *
  6939. * return : cdp_pdev_stats pointer
  6940. */
  6941. static struct cdp_pdev_stats*
  6942. dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
  6943. {
  6944. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6945. dp_aggregate_pdev_stats(pdev);
  6946. return &pdev->stats;
  6947. }
  6948. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  6949. * @peer_handle: DP_PEER handle
  6950. *
  6951. * return : cdp_peer_stats pointer
  6952. */
  6953. static struct cdp_peer_stats*
  6954. dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
  6955. {
  6956. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6957. qdf_assert(peer);
  6958. return &peer->stats;
  6959. }
  6960. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  6961. * @peer_handle: DP_PEER handle
  6962. *
  6963. * return : void
  6964. */
  6965. static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
  6966. {
  6967. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6968. qdf_assert(peer);
  6969. qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
  6970. }
  6971. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  6972. * @vdev_handle: DP_VDEV handle
  6973. * @buf: buffer for vdev stats
  6974. *
  6975. * return : int
  6976. */
  6977. static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
  6978. bool is_aggregate)
  6979. {
  6980. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6981. struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
  6982. if (is_aggregate)
  6983. dp_aggregate_vdev_stats(vdev, buf);
  6984. else
  6985. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  6986. return 0;
  6987. }
  6988. /*
  6989. * dp_get_total_per(): get total per
  6990. * @pdev_handle: DP_PDEV handle
  6991. *
  6992. * Return: % error rate using retries per packet and success packets
  6993. */
  6994. static int dp_get_total_per(struct cdp_pdev *pdev_handle)
  6995. {
  6996. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6997. dp_aggregate_pdev_stats(pdev);
  6998. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  6999. return 0;
  7000. return ((pdev->stats.tx.retries * 100) /
  7001. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  7002. }
  7003. /*
  7004. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  7005. * @pdev_handle: DP_PDEV handle
  7006. * @buf: to hold pdev_stats
  7007. *
  7008. * Return: int
  7009. */
  7010. static int
  7011. dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
  7012. {
  7013. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7014. struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
  7015. struct cdp_txrx_stats_req req = {0,};
  7016. dp_aggregate_pdev_stats(pdev);
  7017. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  7018. req.cookie_val = 1;
  7019. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7020. req.param1, req.param2, req.param3, 0,
  7021. req.cookie_val, 0);
  7022. msleep(DP_MAX_SLEEP_TIME);
  7023. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  7024. req.cookie_val = 1;
  7025. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7026. req.param1, req.param2, req.param3, 0,
  7027. req.cookie_val, 0);
  7028. msleep(DP_MAX_SLEEP_TIME);
  7029. qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
  7030. return TXRX_STATS_LEVEL;
  7031. }
  7032. /**
  7033. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  7034. * @pdev: DP_PDEV handle
  7035. * @map_id: ID of map that needs to be updated
  7036. * @tos: index value in map
  7037. * @tid: tid value passed by the user
  7038. *
  7039. * Return: void
  7040. */
  7041. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  7042. uint8_t map_id, uint8_t tos, uint8_t tid)
  7043. {
  7044. uint8_t dscp;
  7045. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  7046. struct dp_soc *soc = pdev->soc;
  7047. if (!soc)
  7048. return;
  7049. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  7050. pdev->dscp_tid_map[map_id][dscp] = tid;
  7051. if (map_id < soc->num_hw_dscp_tid_map)
  7052. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  7053. map_id, dscp);
  7054. return;
  7055. }
  7056. /**
  7057. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  7058. * @pdev_handle: pdev handle
  7059. * @val: hmmc-dscp flag value
  7060. *
  7061. * Return: void
  7062. */
  7063. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  7064. bool val)
  7065. {
  7066. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7067. pdev->hmmc_tid_override_en = val;
  7068. }
  7069. /**
  7070. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  7071. * @pdev_handle: pdev handle
  7072. * @tid: tid value
  7073. *
  7074. * Return: void
  7075. */
  7076. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7077. uint8_t tid)
  7078. {
  7079. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7080. pdev->hmmc_tid = tid;
  7081. }
  7082. /**
  7083. * dp_fw_stats_process(): Process TxRX FW stats request
  7084. * @vdev_handle: DP VDEV handle
  7085. * @req: stats request
  7086. *
  7087. * return: int
  7088. */
  7089. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  7090. struct cdp_txrx_stats_req *req)
  7091. {
  7092. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7093. struct dp_pdev *pdev = NULL;
  7094. uint32_t stats = req->stats;
  7095. uint8_t mac_id = req->mac_id;
  7096. if (!vdev) {
  7097. DP_TRACE(NONE, "VDEV not found");
  7098. return 1;
  7099. }
  7100. pdev = vdev->pdev;
  7101. /*
  7102. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7103. * from param0 to param3 according to below rule:
  7104. *
  7105. * PARAM:
  7106. * - config_param0 : start_offset (stats type)
  7107. * - config_param1 : stats bmask from start offset
  7108. * - config_param2 : stats bmask from start offset + 32
  7109. * - config_param3 : stats bmask from start offset + 64
  7110. */
  7111. if (req->stats == CDP_TXRX_STATS_0) {
  7112. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7113. req->param1 = 0xFFFFFFFF;
  7114. req->param2 = 0xFFFFFFFF;
  7115. req->param3 = 0xFFFFFFFF;
  7116. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7117. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7118. }
  7119. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7120. req->param1, req->param2, req->param3,
  7121. 0, 0, mac_id);
  7122. }
  7123. /**
  7124. * dp_txrx_stats_request - function to map to firmware and host stats
  7125. * @vdev: virtual handle
  7126. * @req: stats request
  7127. *
  7128. * Return: QDF_STATUS
  7129. */
  7130. static
  7131. QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
  7132. struct cdp_txrx_stats_req *req)
  7133. {
  7134. int host_stats;
  7135. int fw_stats;
  7136. enum cdp_stats stats;
  7137. int num_stats;
  7138. if (!vdev || !req) {
  7139. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7140. "Invalid vdev/req instance");
  7141. return QDF_STATUS_E_INVAL;
  7142. }
  7143. stats = req->stats;
  7144. if (stats >= CDP_TXRX_MAX_STATS)
  7145. return QDF_STATUS_E_INVAL;
  7146. /*
  7147. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7148. * has to be updated if new FW HTT stats added
  7149. */
  7150. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7151. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7152. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7153. if (stats >= num_stats) {
  7154. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7155. "%s: Invalid stats option: %d", __func__, stats);
  7156. return QDF_STATUS_E_INVAL;
  7157. }
  7158. req->stats = stats;
  7159. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7160. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7161. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7162. "stats: %u fw_stats_type: %d host_stats: %d",
  7163. stats, fw_stats, host_stats);
  7164. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7165. /* update request with FW stats type */
  7166. req->stats = fw_stats;
  7167. return dp_fw_stats_process(vdev, req);
  7168. }
  7169. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7170. (host_stats <= TXRX_HOST_STATS_MAX))
  7171. return dp_print_host_stats(vdev, req);
  7172. else
  7173. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7174. "Wrong Input for TxRx Stats");
  7175. return QDF_STATUS_SUCCESS;
  7176. }
  7177. /*
  7178. * dp_print_napi_stats(): NAPI stats
  7179. * @soc - soc handle
  7180. */
  7181. static void dp_print_napi_stats(struct dp_soc *soc)
  7182. {
  7183. hif_print_napi_stats(soc->hif_handle);
  7184. }
  7185. /*
  7186. * dp_print_per_ring_stats(): Packet count per ring
  7187. * @soc - soc handle
  7188. */
  7189. static void dp_print_per_ring_stats(struct dp_soc *soc)
  7190. {
  7191. uint8_t ring;
  7192. uint16_t core;
  7193. uint64_t total_packets;
  7194. DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
  7195. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  7196. total_packets = 0;
  7197. DP_TRACE_STATS(INFO_HIGH,
  7198. "Packets on ring %u:", ring);
  7199. for (core = 0; core < NR_CPUS; core++) {
  7200. DP_TRACE_STATS(INFO_HIGH,
  7201. "Packets arriving on core %u: %llu",
  7202. core,
  7203. soc->stats.rx.ring_packets[core][ring]);
  7204. total_packets += soc->stats.rx.ring_packets[core][ring];
  7205. }
  7206. DP_TRACE_STATS(INFO_HIGH,
  7207. "Total packets on ring %u: %llu",
  7208. ring, total_packets);
  7209. }
  7210. }
  7211. /*
  7212. * dp_txrx_path_stats() - Function to display dump stats
  7213. * @soc - soc handle
  7214. *
  7215. * return: none
  7216. */
  7217. static void dp_txrx_path_stats(struct dp_soc *soc)
  7218. {
  7219. uint8_t error_code;
  7220. uint8_t loop_pdev;
  7221. struct dp_pdev *pdev;
  7222. uint8_t i;
  7223. if (!soc) {
  7224. DP_TRACE(ERROR, "%s: Invalid access",
  7225. __func__);
  7226. return;
  7227. }
  7228. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  7229. pdev = soc->pdev_list[loop_pdev];
  7230. dp_aggregate_pdev_stats(pdev);
  7231. DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
  7232. DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
  7233. pdev->stats.tx_i.rcvd.num,
  7234. pdev->stats.tx_i.rcvd.bytes);
  7235. DP_TRACE_STATS(INFO_HIGH,
  7236. "processed from host: %u msdus (%llu bytes)",
  7237. pdev->stats.tx_i.processed.num,
  7238. pdev->stats.tx_i.processed.bytes);
  7239. DP_TRACE_STATS(INFO_HIGH,
  7240. "successfully transmitted: %u msdus (%llu bytes)",
  7241. pdev->stats.tx.tx_success.num,
  7242. pdev->stats.tx.tx_success.bytes);
  7243. DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
  7244. DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
  7245. pdev->stats.tx_i.dropped.dropped_pkt.num);
  7246. DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
  7247. pdev->stats.tx_i.dropped.desc_na.num);
  7248. DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
  7249. pdev->stats.tx_i.dropped.ring_full);
  7250. DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
  7251. pdev->stats.tx_i.dropped.enqueue_fail);
  7252. DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
  7253. pdev->stats.tx_i.dropped.dma_error);
  7254. DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
  7255. DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
  7256. pdev->stats.tx.tx_failed);
  7257. DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
  7258. pdev->stats.tx.dropped.age_out);
  7259. DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
  7260. pdev->stats.tx.dropped.fw_rem.num);
  7261. DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
  7262. pdev->stats.tx.dropped.fw_rem.bytes);
  7263. DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
  7264. pdev->stats.tx.dropped.fw_rem_tx);
  7265. DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
  7266. pdev->stats.tx.dropped.fw_rem_notx);
  7267. DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
  7268. pdev->soc->stats.tx.tx_invalid_peer.num);
  7269. DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
  7270. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7271. pdev->stats.tx_comp_histogram.pkts_1);
  7272. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7273. pdev->stats.tx_comp_histogram.pkts_2_20);
  7274. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7275. pdev->stats.tx_comp_histogram.pkts_21_40);
  7276. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7277. pdev->stats.tx_comp_histogram.pkts_41_60);
  7278. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7279. pdev->stats.tx_comp_histogram.pkts_61_80);
  7280. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7281. pdev->stats.tx_comp_histogram.pkts_81_100);
  7282. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7283. pdev->stats.tx_comp_histogram.pkts_101_200);
  7284. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7285. pdev->stats.tx_comp_histogram.pkts_201_plus);
  7286. DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
  7287. DP_TRACE_STATS(INFO_HIGH,
  7288. "delivered %u msdus ( %llu bytes),",
  7289. pdev->stats.rx.to_stack.num,
  7290. pdev->stats.rx.to_stack.bytes);
  7291. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  7292. DP_TRACE_STATS(INFO_HIGH,
  7293. "received on reo[%d] %u msdus( %llu bytes),",
  7294. i, pdev->stats.rx.rcvd_reo[i].num,
  7295. pdev->stats.rx.rcvd_reo[i].bytes);
  7296. DP_TRACE_STATS(INFO_HIGH,
  7297. "intra-bss packets %u msdus ( %llu bytes),",
  7298. pdev->stats.rx.intra_bss.pkts.num,
  7299. pdev->stats.rx.intra_bss.pkts.bytes);
  7300. DP_TRACE_STATS(INFO_HIGH,
  7301. "intra-bss fails %u msdus ( %llu bytes),",
  7302. pdev->stats.rx.intra_bss.fail.num,
  7303. pdev->stats.rx.intra_bss.fail.bytes);
  7304. DP_TRACE_STATS(INFO_HIGH,
  7305. "raw packets %u msdus ( %llu bytes),",
  7306. pdev->stats.rx.raw.num,
  7307. pdev->stats.rx.raw.bytes);
  7308. DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
  7309. pdev->stats.rx.err.mic_err);
  7310. DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
  7311. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  7312. DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
  7313. DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
  7314. pdev->soc->stats.rx.err.invalid_rbm);
  7315. DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
  7316. pdev->soc->stats.rx.err.hal_ring_access_fail);
  7317. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  7318. error_code++) {
  7319. if (!pdev->soc->stats.rx.err.reo_error[error_code])
  7320. continue;
  7321. DP_TRACE_STATS(INFO_HIGH,
  7322. "Reo error number (%u): %u msdus",
  7323. error_code,
  7324. pdev->soc->stats.rx.err
  7325. .reo_error[error_code]);
  7326. }
  7327. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  7328. error_code++) {
  7329. if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
  7330. continue;
  7331. DP_TRACE_STATS(INFO_HIGH,
  7332. "Rxdma error number (%u): %u msdus",
  7333. error_code,
  7334. pdev->soc->stats.rx.err
  7335. .rxdma_error[error_code]);
  7336. }
  7337. DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
  7338. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7339. pdev->stats.rx_ind_histogram.pkts_1);
  7340. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7341. pdev->stats.rx_ind_histogram.pkts_2_20);
  7342. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7343. pdev->stats.rx_ind_histogram.pkts_21_40);
  7344. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7345. pdev->stats.rx_ind_histogram.pkts_41_60);
  7346. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7347. pdev->stats.rx_ind_histogram.pkts_61_80);
  7348. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7349. pdev->stats.rx_ind_histogram.pkts_81_100);
  7350. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7351. pdev->stats.rx_ind_histogram.pkts_101_200);
  7352. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7353. pdev->stats.rx_ind_histogram.pkts_201_plus);
  7354. DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  7355. __func__,
  7356. pdev->soc->wlan_cfg_ctx
  7357. ->tso_enabled,
  7358. pdev->soc->wlan_cfg_ctx
  7359. ->lro_enabled,
  7360. pdev->soc->wlan_cfg_ctx
  7361. ->rx_hash,
  7362. pdev->soc->wlan_cfg_ctx
  7363. ->napi_enabled);
  7364. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7365. DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  7366. __func__,
  7367. pdev->soc->wlan_cfg_ctx
  7368. ->tx_flow_stop_queue_threshold,
  7369. pdev->soc->wlan_cfg_ctx
  7370. ->tx_flow_start_queue_offset);
  7371. #endif
  7372. }
  7373. }
  7374. /*
  7375. * dp_txrx_dump_stats() - Dump statistics
  7376. * @value - Statistics option
  7377. */
  7378. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  7379. enum qdf_stats_verbosity_level level)
  7380. {
  7381. struct dp_soc *soc =
  7382. (struct dp_soc *)psoc;
  7383. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7384. if (!soc) {
  7385. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7386. "%s: soc is NULL", __func__);
  7387. return QDF_STATUS_E_INVAL;
  7388. }
  7389. switch (value) {
  7390. case CDP_TXRX_PATH_STATS:
  7391. dp_txrx_path_stats(soc);
  7392. break;
  7393. case CDP_RX_RING_STATS:
  7394. dp_print_per_ring_stats(soc);
  7395. break;
  7396. case CDP_TXRX_TSO_STATS:
  7397. /* TODO: NOT IMPLEMENTED */
  7398. break;
  7399. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7400. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7401. break;
  7402. case CDP_DP_NAPI_STATS:
  7403. dp_print_napi_stats(soc);
  7404. break;
  7405. case CDP_TXRX_DESC_STATS:
  7406. /* TODO: NOT IMPLEMENTED */
  7407. break;
  7408. default:
  7409. status = QDF_STATUS_E_INVAL;
  7410. break;
  7411. }
  7412. return status;
  7413. }
  7414. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7415. /**
  7416. * dp_update_flow_control_parameters() - API to store datapath
  7417. * config parameters
  7418. * @soc: soc handle
  7419. * @cfg: ini parameter handle
  7420. *
  7421. * Return: void
  7422. */
  7423. static inline
  7424. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7425. struct cdp_config_params *params)
  7426. {
  7427. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7428. params->tx_flow_stop_queue_threshold;
  7429. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7430. params->tx_flow_start_queue_offset;
  7431. }
  7432. #else
  7433. static inline
  7434. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7435. struct cdp_config_params *params)
  7436. {
  7437. }
  7438. #endif
  7439. /**
  7440. * dp_update_config_parameters() - API to store datapath
  7441. * config parameters
  7442. * @soc: soc handle
  7443. * @cfg: ini parameter handle
  7444. *
  7445. * Return: status
  7446. */
  7447. static
  7448. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7449. struct cdp_config_params *params)
  7450. {
  7451. struct dp_soc *soc = (struct dp_soc *)psoc;
  7452. if (!(soc)) {
  7453. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7454. "%s: Invalid handle", __func__);
  7455. return QDF_STATUS_E_INVAL;
  7456. }
  7457. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7458. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7459. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7460. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7461. params->tcp_udp_checksumoffload;
  7462. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7463. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7464. soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
  7465. dp_update_flow_control_parameters(soc, params);
  7466. return QDF_STATUS_SUCCESS;
  7467. }
  7468. /**
  7469. * dp_txrx_set_wds_rx_policy() - API to store datapath
  7470. * config parameters
  7471. * @vdev_handle - datapath vdev handle
  7472. * @cfg: ini parameter handle
  7473. *
  7474. * Return: status
  7475. */
  7476. #ifdef WDS_VENDOR_EXTENSION
  7477. void
  7478. dp_txrx_set_wds_rx_policy(
  7479. struct cdp_vdev *vdev_handle,
  7480. u_int32_t val)
  7481. {
  7482. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7483. struct dp_peer *peer;
  7484. if (vdev->opmode == wlan_op_mode_ap) {
  7485. /* for ap, set it on bss_peer */
  7486. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  7487. if (peer->bss_peer) {
  7488. peer->wds_ecm.wds_rx_filter = 1;
  7489. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7490. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7491. break;
  7492. }
  7493. }
  7494. } else if (vdev->opmode == wlan_op_mode_sta) {
  7495. peer = TAILQ_FIRST(&vdev->peer_list);
  7496. peer->wds_ecm.wds_rx_filter = 1;
  7497. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7498. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7499. }
  7500. }
  7501. /**
  7502. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  7503. *
  7504. * @peer_handle - datapath peer handle
  7505. * @wds_tx_ucast: policy for unicast transmission
  7506. * @wds_tx_mcast: policy for multicast transmission
  7507. *
  7508. * Return: void
  7509. */
  7510. void
  7511. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  7512. int wds_tx_ucast, int wds_tx_mcast)
  7513. {
  7514. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7515. if (wds_tx_ucast || wds_tx_mcast) {
  7516. peer->wds_enabled = 1;
  7517. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  7518. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  7519. } else {
  7520. peer->wds_enabled = 0;
  7521. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  7522. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  7523. }
  7524. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7525. FL("Policy Update set to :\
  7526. peer->wds_enabled %d\
  7527. peer->wds_ecm.wds_tx_ucast_4addr %d\
  7528. peer->wds_ecm.wds_tx_mcast_4addr %d"),
  7529. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  7530. peer->wds_ecm.wds_tx_mcast_4addr);
  7531. return;
  7532. }
  7533. #endif
  7534. static struct cdp_wds_ops dp_ops_wds = {
  7535. .vdev_set_wds = dp_vdev_set_wds,
  7536. #ifdef WDS_VENDOR_EXTENSION
  7537. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7538. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7539. #endif
  7540. };
  7541. /*
  7542. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7543. * @vdev_handle - datapath vdev handle
  7544. * @callback - callback function
  7545. * @ctxt: callback context
  7546. *
  7547. */
  7548. static void
  7549. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  7550. ol_txrx_data_tx_cb callback, void *ctxt)
  7551. {
  7552. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7553. vdev->tx_non_std_data_callback.func = callback;
  7554. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7555. }
  7556. /**
  7557. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7558. * @pdev_hdl: datapath pdev handle
  7559. *
  7560. * Return: opaque pointer to dp txrx handle
  7561. */
  7562. static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
  7563. {
  7564. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7565. return pdev->dp_txrx_handle;
  7566. }
  7567. /**
  7568. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7569. * @pdev_hdl: datapath pdev handle
  7570. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7571. *
  7572. * Return: void
  7573. */
  7574. static void
  7575. dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
  7576. {
  7577. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7578. pdev->dp_txrx_handle = dp_txrx_hdl;
  7579. }
  7580. /**
  7581. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7582. * @soc_handle: datapath soc handle
  7583. *
  7584. * Return: opaque pointer to external dp (non-core DP)
  7585. */
  7586. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7587. {
  7588. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7589. return soc->external_txrx_handle;
  7590. }
  7591. /**
  7592. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7593. * @soc_handle: datapath soc handle
  7594. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7595. *
  7596. * Return: void
  7597. */
  7598. static void
  7599. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7600. {
  7601. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7602. soc->external_txrx_handle = txrx_handle;
  7603. }
  7604. /**
  7605. * dp_get_cfg_capabilities() - get dp capabilities
  7606. * @soc_handle: datapath soc handle
  7607. * @dp_caps: enum for dp capabilities
  7608. *
  7609. * Return: bool to determine if dp caps is enabled
  7610. */
  7611. static bool
  7612. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7613. enum cdp_capabilities dp_caps)
  7614. {
  7615. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7616. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7617. }
  7618. #ifdef FEATURE_AST
  7619. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  7620. {
  7621. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  7622. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  7623. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7624. /*
  7625. * For BSS peer, new peer is not created on alloc_node if the
  7626. * peer with same address already exists , instead refcnt is
  7627. * increased for existing peer. Correspondingly in delete path,
  7628. * only refcnt is decreased; and peer is only deleted , when all
  7629. * references are deleted. So delete_in_progress should not be set
  7630. * for bss_peer, unless only 2 reference remains (peer map reference
  7631. * and peer hash table reference).
  7632. */
  7633. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
  7634. return;
  7635. }
  7636. peer->delete_in_progress = true;
  7637. dp_peer_delete_ast_entries(soc, peer);
  7638. }
  7639. #endif
  7640. #ifdef ATH_SUPPORT_NAC_RSSI
  7641. /**
  7642. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7643. * @vdev_hdl: DP vdev handle
  7644. * @rssi: rssi value
  7645. *
  7646. * Return: 0 for success. nonzero for failure.
  7647. */
  7648. QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7649. char *mac_addr,
  7650. uint8_t *rssi)
  7651. {
  7652. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7653. struct dp_pdev *pdev = vdev->pdev;
  7654. struct dp_neighbour_peer *peer = NULL;
  7655. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7656. *rssi = 0;
  7657. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7658. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7659. neighbour_peer_list_elem) {
  7660. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7661. mac_addr, DP_MAC_ADDR_LEN) == 0) {
  7662. *rssi = peer->rssi;
  7663. status = QDF_STATUS_SUCCESS;
  7664. break;
  7665. }
  7666. }
  7667. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7668. return status;
  7669. }
  7670. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7671. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7672. uint8_t chan_num)
  7673. {
  7674. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7675. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7676. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7677. pdev->nac_rssi_filtering = 1;
  7678. /* Store address of NAC (neighbour peer) which will be checked
  7679. * against TA of received packets.
  7680. */
  7681. if (cmd == CDP_NAC_PARAM_ADD) {
  7682. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7683. client_macaddr);
  7684. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7685. dp_update_filter_neighbour_peers(vdev_handle,
  7686. DP_NAC_PARAM_DEL,
  7687. client_macaddr);
  7688. }
  7689. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7690. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7691. ((void *)vdev->pdev->ctrl_pdev,
  7692. vdev->vdev_id, cmd, bssid);
  7693. return QDF_STATUS_SUCCESS;
  7694. }
  7695. #endif
  7696. /**
  7697. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7698. * for pktlog
  7699. * @txrx_pdev_handle: cdp_pdev handle
  7700. * @enb_dsb: Enable or disable peer based filtering
  7701. *
  7702. * Return: QDF_STATUS
  7703. */
  7704. static int
  7705. dp_enable_peer_based_pktlog(
  7706. struct cdp_pdev *txrx_pdev_handle,
  7707. char *mac_addr, uint8_t enb_dsb)
  7708. {
  7709. struct dp_peer *peer;
  7710. uint8_t local_id;
  7711. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7712. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7713. mac_addr, &local_id);
  7714. if (!peer) {
  7715. dp_err("Invalid Peer");
  7716. return QDF_STATUS_E_FAILURE;
  7717. }
  7718. peer->peer_based_pktlog_filter = enb_dsb;
  7719. pdev->dp_peer_based_pktlog = enb_dsb;
  7720. return QDF_STATUS_SUCCESS;
  7721. }
  7722. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  7723. uint32_t max_peers,
  7724. bool peer_map_unmap_v2)
  7725. {
  7726. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7727. soc->max_peers = max_peers;
  7728. qdf_print ("%s max_peers %u\n", __func__, max_peers);
  7729. if (dp_peer_find_attach(soc))
  7730. return QDF_STATUS_E_FAILURE;
  7731. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  7732. return QDF_STATUS_SUCCESS;
  7733. }
  7734. /**
  7735. * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
  7736. * @dp_pdev: dp pdev handle
  7737. * @ctrl_pdev: UMAC ctrl pdev handle
  7738. *
  7739. * Return: void
  7740. */
  7741. static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
  7742. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  7743. {
  7744. struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
  7745. pdev->ctrl_pdev = ctrl_pdev;
  7746. }
  7747. /*
  7748. * dp_get_cfg() - get dp cfg
  7749. * @soc: cdp soc handle
  7750. * @cfg: cfg enum
  7751. *
  7752. * Return: cfg value
  7753. */
  7754. static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
  7755. {
  7756. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  7757. uint32_t value = 0;
  7758. switch (cfg) {
  7759. case cfg_dp_enable_data_stall:
  7760. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  7761. break;
  7762. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  7763. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  7764. break;
  7765. case cfg_dp_tso_enable:
  7766. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  7767. break;
  7768. case cfg_dp_lro_enable:
  7769. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  7770. break;
  7771. case cfg_dp_gro_enable:
  7772. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  7773. break;
  7774. case cfg_dp_tx_flow_start_queue_offset:
  7775. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  7776. break;
  7777. case cfg_dp_tx_flow_stop_queue_threshold:
  7778. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  7779. break;
  7780. case cfg_dp_disable_intra_bss_fwd:
  7781. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  7782. break;
  7783. default:
  7784. value = 0;
  7785. }
  7786. return value;
  7787. }
  7788. static struct cdp_cmn_ops dp_ops_cmn = {
  7789. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  7790. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  7791. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  7792. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  7793. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  7794. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  7795. .txrx_peer_create = dp_peer_create_wifi3,
  7796. .txrx_peer_setup = dp_peer_setup_wifi3,
  7797. #ifdef FEATURE_AST
  7798. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  7799. #else
  7800. .txrx_peer_teardown = NULL,
  7801. #endif
  7802. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  7803. .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
  7804. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  7805. .txrx_peer_ast_hash_find_soc = dp_peer_ast_hash_find_soc_wifi3,
  7806. .txrx_peer_ast_hash_find_by_pdevid =
  7807. dp_peer_ast_hash_find_by_pdevid_wifi3,
  7808. .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
  7809. .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
  7810. .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
  7811. .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
  7812. .txrx_peer_ast_get_peer = dp_peer_ast_get_peer_wifi3,
  7813. .txrx_peer_ast_get_nexthop_peer_id =
  7814. dp_peer_ast_get_nexhop_peer_id_wifi3,
  7815. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  7816. .txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
  7817. .txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
  7818. .txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
  7819. .txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
  7820. #endif
  7821. .txrx_peer_delete = dp_peer_delete_wifi3,
  7822. .txrx_vdev_register = dp_vdev_register_wifi3,
  7823. .txrx_soc_detach = dp_soc_detach_wifi3,
  7824. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  7825. .txrx_soc_init = dp_soc_init_wifi3,
  7826. .txrx_tso_soc_attach = dp_tso_soc_attach,
  7827. .txrx_tso_soc_detach = dp_tso_soc_detach,
  7828. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  7829. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  7830. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  7831. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  7832. .txrx_ath_getstats = dp_get_device_stats,
  7833. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  7834. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  7835. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  7836. .delba_process = dp_delba_process_wifi3,
  7837. .set_addba_response = dp_set_addba_response,
  7838. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  7839. .flush_cache_rx_queue = NULL,
  7840. /* TODO: get API's for dscp-tid need to be added*/
  7841. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  7842. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  7843. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  7844. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  7845. .txrx_get_total_per = dp_get_total_per,
  7846. .txrx_stats_request = dp_txrx_stats_request,
  7847. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  7848. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  7849. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  7850. .txrx_set_nac = dp_set_nac,
  7851. .txrx_get_tx_pending = dp_get_tx_pending,
  7852. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  7853. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  7854. .display_stats = dp_txrx_dump_stats,
  7855. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  7856. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  7857. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  7858. .txrx_intr_detach = dp_soc_interrupt_detach,
  7859. .set_pn_check = dp_set_pn_check_wifi3,
  7860. .update_config_parameters = dp_update_config_parameters,
  7861. /* TODO: Add other functions */
  7862. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  7863. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  7864. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  7865. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  7866. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  7867. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  7868. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  7869. .tx_send = dp_tx_send,
  7870. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  7871. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  7872. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  7873. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  7874. .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
  7875. .txrx_get_os_rx_handles_from_vdev =
  7876. dp_get_os_rx_handles_from_vdev_wifi3,
  7877. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  7878. .get_dp_capabilities = dp_get_cfg_capabilities,
  7879. .txrx_get_cfg = dp_get_cfg,
  7880. };
  7881. static struct cdp_ctrl_ops dp_ops_ctrl = {
  7882. .txrx_peer_authorize = dp_peer_authorize,
  7883. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  7884. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  7885. #ifdef MESH_MODE_SUPPORT
  7886. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  7887. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  7888. #endif
  7889. .txrx_set_vdev_param = dp_set_vdev_param,
  7890. .txrx_peer_set_nawds = dp_peer_set_nawds,
  7891. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  7892. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  7893. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  7894. .txrx_update_filter_neighbour_peers =
  7895. dp_update_filter_neighbour_peers,
  7896. .txrx_get_sec_type = dp_get_sec_type,
  7897. /* TODO: Add other functions */
  7898. .txrx_wdi_event_sub = dp_wdi_event_sub,
  7899. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  7900. #ifdef WDI_EVENT_ENABLE
  7901. .txrx_get_pldev = dp_get_pldev,
  7902. #endif
  7903. .txrx_set_pdev_param = dp_set_pdev_param,
  7904. #ifdef ATH_SUPPORT_NAC_RSSI
  7905. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  7906. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  7907. #endif
  7908. .set_key = dp_set_michael_key,
  7909. .txrx_get_vdev_param = dp_get_vdev_param,
  7910. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  7911. };
  7912. static struct cdp_me_ops dp_ops_me = {
  7913. #ifdef ATH_SUPPORT_IQUE
  7914. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  7915. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  7916. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  7917. #endif
  7918. .tx_me_find_ast_entry = NULL,
  7919. };
  7920. static struct cdp_mon_ops dp_ops_mon = {
  7921. .txrx_monitor_set_filter_ucast_data = NULL,
  7922. .txrx_monitor_set_filter_mcast_data = NULL,
  7923. .txrx_monitor_set_filter_non_data = NULL,
  7924. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  7925. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  7926. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  7927. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  7928. /* Added support for HK advance filter */
  7929. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  7930. };
  7931. static struct cdp_host_stats_ops dp_ops_host_stats = {
  7932. .txrx_per_peer_stats = dp_get_host_peer_stats,
  7933. .get_fw_peer_stats = dp_get_fw_peer_stats,
  7934. .get_htt_stats = dp_get_htt_stats,
  7935. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  7936. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  7937. .txrx_stats_publish = dp_txrx_stats_publish,
  7938. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  7939. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  7940. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  7941. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  7942. /* TODO */
  7943. };
  7944. static struct cdp_raw_ops dp_ops_raw = {
  7945. /* TODO */
  7946. };
  7947. #ifdef CONFIG_WIN
  7948. static struct cdp_pflow_ops dp_ops_pflow = {
  7949. /* TODO */
  7950. };
  7951. #endif /* CONFIG_WIN */
  7952. #ifdef FEATURE_RUNTIME_PM
  7953. /**
  7954. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  7955. * @opaque_pdev: DP pdev context
  7956. *
  7957. * DP is ready to runtime suspend if there are no pending TX packets.
  7958. *
  7959. * Return: QDF_STATUS
  7960. */
  7961. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  7962. {
  7963. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7964. struct dp_soc *soc = pdev->soc;
  7965. /* Abort if there are any pending TX packets */
  7966. if (dp_get_tx_pending(opaque_pdev) > 0) {
  7967. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7968. FL("Abort suspend due to pending TX packets"));
  7969. return QDF_STATUS_E_AGAIN;
  7970. }
  7971. if (soc->intr_mode == DP_INTR_POLL)
  7972. qdf_timer_stop(&soc->int_timer);
  7973. return QDF_STATUS_SUCCESS;
  7974. }
  7975. /**
  7976. * dp_runtime_resume() - ensure DP is ready to runtime resume
  7977. * @opaque_pdev: DP pdev context
  7978. *
  7979. * Resume DP for runtime PM.
  7980. *
  7981. * Return: QDF_STATUS
  7982. */
  7983. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  7984. {
  7985. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7986. struct dp_soc *soc = pdev->soc;
  7987. void *hal_srng;
  7988. int i;
  7989. if (soc->intr_mode == DP_INTR_POLL)
  7990. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  7991. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  7992. hal_srng = soc->tcl_data_ring[i].hal_srng;
  7993. if (hal_srng) {
  7994. /* We actually only need to acquire the lock */
  7995. hal_srng_access_start(soc->hal_soc, hal_srng);
  7996. /* Update SRC ring head pointer for HW to send
  7997. all pending packets */
  7998. hal_srng_access_end(soc->hal_soc, hal_srng);
  7999. }
  8000. }
  8001. return QDF_STATUS_SUCCESS;
  8002. }
  8003. #endif /* FEATURE_RUNTIME_PM */
  8004. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  8005. {
  8006. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8007. struct dp_soc *soc = pdev->soc;
  8008. if (soc->intr_mode == DP_INTR_POLL)
  8009. qdf_timer_stop(&soc->int_timer);
  8010. return QDF_STATUS_SUCCESS;
  8011. }
  8012. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  8013. {
  8014. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8015. struct dp_soc *soc = pdev->soc;
  8016. if (soc->intr_mode == DP_INTR_POLL)
  8017. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8018. return QDF_STATUS_SUCCESS;
  8019. }
  8020. #ifndef CONFIG_WIN
  8021. static struct cdp_misc_ops dp_ops_misc = {
  8022. .tx_non_std = dp_tx_non_std,
  8023. .get_opmode = dp_get_opmode,
  8024. #ifdef FEATURE_RUNTIME_PM
  8025. .runtime_suspend = dp_runtime_suspend,
  8026. .runtime_resume = dp_runtime_resume,
  8027. #endif /* FEATURE_RUNTIME_PM */
  8028. .pkt_log_init = dp_pkt_log_init,
  8029. .pkt_log_con_service = dp_pkt_log_con_service,
  8030. .get_num_rx_contexts = dp_get_num_rx_contexts,
  8031. };
  8032. static struct cdp_flowctl_ops dp_ops_flowctl = {
  8033. /* WIFI 3.0 DP implement as required. */
  8034. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  8035. .flow_pool_map_handler = dp_tx_flow_pool_map,
  8036. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  8037. .register_pause_cb = dp_txrx_register_pause_cb,
  8038. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  8039. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  8040. };
  8041. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  8042. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8043. };
  8044. #ifdef IPA_OFFLOAD
  8045. static struct cdp_ipa_ops dp_ops_ipa = {
  8046. .ipa_get_resource = dp_ipa_get_resource,
  8047. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  8048. .ipa_op_response = dp_ipa_op_response,
  8049. .ipa_register_op_cb = dp_ipa_register_op_cb,
  8050. .ipa_get_stat = dp_ipa_get_stat,
  8051. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  8052. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  8053. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  8054. .ipa_setup = dp_ipa_setup,
  8055. .ipa_cleanup = dp_ipa_cleanup,
  8056. .ipa_setup_iface = dp_ipa_setup_iface,
  8057. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  8058. .ipa_enable_pipes = dp_ipa_enable_pipes,
  8059. .ipa_disable_pipes = dp_ipa_disable_pipes,
  8060. .ipa_set_perf_level = dp_ipa_set_perf_level
  8061. };
  8062. #endif
  8063. static struct cdp_bus_ops dp_ops_bus = {
  8064. .bus_suspend = dp_bus_suspend,
  8065. .bus_resume = dp_bus_resume
  8066. };
  8067. static struct cdp_ocb_ops dp_ops_ocb = {
  8068. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8069. };
  8070. static struct cdp_throttle_ops dp_ops_throttle = {
  8071. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8072. };
  8073. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  8074. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8075. };
  8076. static struct cdp_cfg_ops dp_ops_cfg = {
  8077. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8078. };
  8079. /*
  8080. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8081. * @dev: physical device instance
  8082. * @peer_mac_addr: peer mac address
  8083. * @local_id: local id for the peer
  8084. * @debug_id: to track enum peer access
  8085. *
  8086. * Return: peer instance pointer
  8087. */
  8088. static inline void *
  8089. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8090. uint8_t *local_id,
  8091. enum peer_debug_id_type debug_id)
  8092. {
  8093. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8094. struct dp_peer *peer;
  8095. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8096. if (!peer)
  8097. return NULL;
  8098. *local_id = peer->local_id;
  8099. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  8100. return peer;
  8101. }
  8102. /*
  8103. * dp_peer_release_ref - release peer ref count
  8104. * @peer: peer handle
  8105. * @debug_id: to track enum peer access
  8106. *
  8107. * Return: None
  8108. */
  8109. static inline
  8110. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8111. {
  8112. dp_peer_unref_delete(peer);
  8113. }
  8114. static struct cdp_peer_ops dp_ops_peer = {
  8115. .register_peer = dp_register_peer,
  8116. .clear_peer = dp_clear_peer,
  8117. .find_peer_by_addr = dp_find_peer_by_addr,
  8118. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8119. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8120. .peer_release_ref = dp_peer_release_ref,
  8121. .local_peer_id = dp_local_peer_id,
  8122. .peer_find_by_local_id = dp_peer_find_by_local_id,
  8123. .peer_state_update = dp_peer_state_update,
  8124. .get_vdevid = dp_get_vdevid,
  8125. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  8126. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8127. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8128. .get_peer_state = dp_get_peer_state,
  8129. };
  8130. #endif
  8131. static struct cdp_ops dp_txrx_ops = {
  8132. .cmn_drv_ops = &dp_ops_cmn,
  8133. .ctrl_ops = &dp_ops_ctrl,
  8134. .me_ops = &dp_ops_me,
  8135. .mon_ops = &dp_ops_mon,
  8136. .host_stats_ops = &dp_ops_host_stats,
  8137. .wds_ops = &dp_ops_wds,
  8138. .raw_ops = &dp_ops_raw,
  8139. #ifdef CONFIG_WIN
  8140. .pflow_ops = &dp_ops_pflow,
  8141. #endif /* CONFIG_WIN */
  8142. #ifndef CONFIG_WIN
  8143. .misc_ops = &dp_ops_misc,
  8144. .cfg_ops = &dp_ops_cfg,
  8145. .flowctl_ops = &dp_ops_flowctl,
  8146. .l_flowctl_ops = &dp_ops_l_flowctl,
  8147. #ifdef IPA_OFFLOAD
  8148. .ipa_ops = &dp_ops_ipa,
  8149. #endif
  8150. .bus_ops = &dp_ops_bus,
  8151. .ocb_ops = &dp_ops_ocb,
  8152. .peer_ops = &dp_ops_peer,
  8153. .throttle_ops = &dp_ops_throttle,
  8154. .mob_stats_ops = &dp_ops_mob_stats,
  8155. #endif
  8156. };
  8157. /*
  8158. * dp_soc_set_txrx_ring_map()
  8159. * @dp_soc: DP handler for soc
  8160. *
  8161. * Return: Void
  8162. */
  8163. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  8164. {
  8165. uint32_t i;
  8166. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  8167. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  8168. }
  8169. }
  8170. #ifdef QCA_WIFI_QCA8074
  8171. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  8172. /**
  8173. * dp_soc_attach_wifi3() - Attach txrx SOC
  8174. * @ctrl_psoc: Opaque SOC handle from control plane
  8175. * @htc_handle: Opaque HTC handle
  8176. * @hif_handle: Opaque HIF handle
  8177. * @qdf_osdev: QDF device
  8178. * @ol_ops: Offload Operations
  8179. * @device_id: Device ID
  8180. *
  8181. * Return: DP SOC handle on success, NULL on failure
  8182. */
  8183. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8184. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8185. struct ol_if_ops *ol_ops, uint16_t device_id)
  8186. {
  8187. struct dp_soc *dp_soc = NULL;
  8188. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8189. ol_ops, device_id);
  8190. if (!dp_soc)
  8191. return NULL;
  8192. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  8193. return NULL;
  8194. return (void *)dp_soc;
  8195. }
  8196. #else
  8197. /**
  8198. * dp_soc_attach_wifi3() - Attach txrx SOC
  8199. * @ctrl_psoc: Opaque SOC handle from control plane
  8200. * @htc_handle: Opaque HTC handle
  8201. * @hif_handle: Opaque HIF handle
  8202. * @qdf_osdev: QDF device
  8203. * @ol_ops: Offload Operations
  8204. * @device_id: Device ID
  8205. *
  8206. * Return: DP SOC handle on success, NULL on failure
  8207. */
  8208. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8209. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8210. struct ol_if_ops *ol_ops, uint16_t device_id)
  8211. {
  8212. struct dp_soc *dp_soc = NULL;
  8213. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8214. ol_ops, device_id);
  8215. return (void *)dp_soc;
  8216. }
  8217. #endif
  8218. /**
  8219. * dp_soc_attach() - Attach txrx SOC
  8220. * @ctrl_psoc: Opaque SOC handle from control plane
  8221. * @htc_handle: Opaque HTC handle
  8222. * @qdf_osdev: QDF device
  8223. * @ol_ops: Offload Operations
  8224. * @device_id: Device ID
  8225. *
  8226. * Return: DP SOC handle on success, NULL on failure
  8227. */
  8228. static struct dp_soc *
  8229. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8230. struct ol_if_ops *ol_ops, uint16_t device_id)
  8231. {
  8232. int int_ctx;
  8233. struct dp_soc *soc = NULL;
  8234. struct htt_soc *htt_soc = NULL;
  8235. soc = qdf_mem_malloc(sizeof(*soc));
  8236. if (!soc) {
  8237. dp_err("DP SOC memory allocation failed");
  8238. goto fail0;
  8239. }
  8240. int_ctx = 0;
  8241. soc->device_id = device_id;
  8242. soc->cdp_soc.ops = &dp_txrx_ops;
  8243. soc->cdp_soc.ol_ops = ol_ops;
  8244. soc->ctrl_psoc = ctrl_psoc;
  8245. soc->osdev = qdf_osdev;
  8246. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  8247. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  8248. if (!soc->wlan_cfg_ctx) {
  8249. dp_err("wlan_cfg_ctx failed\n");
  8250. goto fail1;
  8251. }
  8252. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  8253. if (!htt_soc) {
  8254. dp_err("HTT attach failed");
  8255. goto fail1;
  8256. }
  8257. soc->htt_handle = htt_soc;
  8258. htt_soc->dp_soc = soc;
  8259. htt_soc->htc_soc = htc_handle;
  8260. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  8261. goto fail2;
  8262. return (void *)soc;
  8263. fail2:
  8264. qdf_mem_free(htt_soc);
  8265. fail1:
  8266. qdf_mem_free(soc);
  8267. fail0:
  8268. return NULL;
  8269. }
  8270. /**
  8271. * dp_soc_init() - Initialize txrx SOC
  8272. * @dp_soc: Opaque DP SOC handle
  8273. * @htc_handle: Opaque HTC handle
  8274. * @hif_handle: Opaque HIF handle
  8275. *
  8276. * Return: DP SOC handle on success, NULL on failure
  8277. */
  8278. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
  8279. {
  8280. int target_type;
  8281. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  8282. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  8283. htt_soc->htc_soc = htc_handle;
  8284. soc->hif_handle = hif_handle;
  8285. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  8286. if (!soc->hal_soc)
  8287. return NULL;
  8288. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
  8289. soc->hal_soc, soc->osdev);
  8290. target_type = hal_get_target_type(soc->hal_soc);
  8291. switch (target_type) {
  8292. case TARGET_TYPE_QCA6290:
  8293. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8294. REO_DST_RING_SIZE_QCA6290);
  8295. soc->ast_override_support = 1;
  8296. break;
  8297. #ifdef QCA_WIFI_QCA6390
  8298. case TARGET_TYPE_QCA6390:
  8299. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8300. REO_DST_RING_SIZE_QCA6290);
  8301. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8302. soc->ast_override_support = 1;
  8303. if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  8304. int int_ctx;
  8305. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  8306. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  8307. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  8308. }
  8309. }
  8310. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  8311. break;
  8312. #endif
  8313. case TARGET_TYPE_QCA8074:
  8314. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8315. REO_DST_RING_SIZE_QCA8074);
  8316. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8317. break;
  8318. case TARGET_TYPE_QCA8074V2:
  8319. case TARGET_TYPE_QCA6018:
  8320. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8321. REO_DST_RING_SIZE_QCA8074);
  8322. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  8323. soc->hw_nac_monitor_support = 1;
  8324. soc->ast_override_support = 1;
  8325. soc->per_tid_basize_max_tid = 8;
  8326. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  8327. break;
  8328. default:
  8329. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  8330. qdf_assert_always(0);
  8331. break;
  8332. }
  8333. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  8334. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  8335. soc->cce_disable = false;
  8336. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  8337. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8338. CDP_CFG_MAX_PEER_ID);
  8339. if (ret != -EINVAL) {
  8340. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  8341. }
  8342. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8343. CDP_CFG_CCE_DISABLE);
  8344. if (ret == 1)
  8345. soc->cce_disable = true;
  8346. }
  8347. qdf_spinlock_create(&soc->peer_ref_mutex);
  8348. qdf_spinlock_create(&soc->ast_lock);
  8349. dp_soc_wds_attach(soc);
  8350. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  8351. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  8352. /* fill the tx/rx cpu ring map*/
  8353. dp_soc_set_txrx_ring_map(soc);
  8354. qdf_spinlock_create(&soc->htt_stats.lock);
  8355. /* initialize work queue for stats processing */
  8356. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  8357. return soc;
  8358. }
  8359. /**
  8360. * dp_soc_init_wifi3() - Initialize txrx SOC
  8361. * @dp_soc: Opaque DP SOC handle
  8362. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  8363. * @hif_handle: Opaque HIF handle
  8364. * @htc_handle: Opaque HTC handle
  8365. * @qdf_osdev: QDF device (Unused)
  8366. * @ol_ops: Offload Operations (Unused)
  8367. * @device_id: Device ID (Unused)
  8368. *
  8369. * Return: DP SOC handle on success, NULL on failure
  8370. */
  8371. void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
  8372. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8373. struct ol_if_ops *ol_ops, uint16_t device_id)
  8374. {
  8375. return dp_soc_init(dpsoc, htc_handle, hif_handle);
  8376. }
  8377. #endif
  8378. /*
  8379. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  8380. *
  8381. * @soc: handle to DP soc
  8382. * @mac_id: MAC id
  8383. *
  8384. * Return: Return pdev corresponding to MAC
  8385. */
  8386. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  8387. {
  8388. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  8389. return soc->pdev_list[mac_id];
  8390. /* Typically for MCL as there only 1 PDEV*/
  8391. return soc->pdev_list[0];
  8392. }
  8393. /*
  8394. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  8395. * @soc: DP SoC context
  8396. * @max_mac_rings: No of MAC rings
  8397. *
  8398. * Return: None
  8399. */
  8400. static
  8401. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  8402. int *max_mac_rings)
  8403. {
  8404. bool dbs_enable = false;
  8405. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  8406. dbs_enable = soc->cdp_soc.ol_ops->
  8407. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  8408. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  8409. }
  8410. /*
  8411. * dp_set_pktlog_wifi3() - attach txrx vdev
  8412. * @pdev: Datapath PDEV handle
  8413. * @event: which event's notifications are being subscribed to
  8414. * @enable: WDI event subscribe or not. (True or False)
  8415. *
  8416. * Return: Success, NULL on failure
  8417. */
  8418. #ifdef WDI_EVENT_ENABLE
  8419. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  8420. bool enable)
  8421. {
  8422. struct dp_soc *soc = NULL;
  8423. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  8424. int max_mac_rings = wlan_cfg_get_num_mac_rings
  8425. (pdev->wlan_cfg_ctx);
  8426. uint8_t mac_id = 0;
  8427. soc = pdev->soc;
  8428. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  8429. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  8430. FL("Max_mac_rings %d "),
  8431. max_mac_rings);
  8432. if (enable) {
  8433. switch (event) {
  8434. case WDI_EVENT_RX_DESC:
  8435. if (pdev->monitor_vdev) {
  8436. /* Nothing needs to be done if monitor mode is
  8437. * enabled
  8438. */
  8439. return 0;
  8440. }
  8441. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  8442. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  8443. htt_tlv_filter.mpdu_start = 1;
  8444. htt_tlv_filter.msdu_start = 1;
  8445. htt_tlv_filter.msdu_end = 1;
  8446. htt_tlv_filter.mpdu_end = 1;
  8447. htt_tlv_filter.packet_header = 1;
  8448. htt_tlv_filter.attention = 1;
  8449. htt_tlv_filter.ppdu_start = 1;
  8450. htt_tlv_filter.ppdu_end = 1;
  8451. htt_tlv_filter.ppdu_end_user_stats = 1;
  8452. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8453. htt_tlv_filter.ppdu_end_status_done = 1;
  8454. htt_tlv_filter.enable_fp = 1;
  8455. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8456. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8457. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8458. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8459. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8460. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8461. for (mac_id = 0; mac_id < max_mac_rings;
  8462. mac_id++) {
  8463. int mac_for_pdev =
  8464. dp_get_mac_id_for_pdev(mac_id,
  8465. pdev->pdev_id);
  8466. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8467. mac_for_pdev,
  8468. pdev->rxdma_mon_status_ring[mac_id]
  8469. .hal_srng,
  8470. RXDMA_MONITOR_STATUS,
  8471. RX_BUFFER_SIZE,
  8472. &htt_tlv_filter);
  8473. }
  8474. if (soc->reap_timer_init)
  8475. qdf_timer_mod(&soc->mon_reap_timer,
  8476. DP_INTR_POLL_TIMER_MS);
  8477. }
  8478. break;
  8479. case WDI_EVENT_LITE_RX:
  8480. if (pdev->monitor_vdev) {
  8481. /* Nothing needs to be done if monitor mode is
  8482. * enabled
  8483. */
  8484. return 0;
  8485. }
  8486. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  8487. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  8488. htt_tlv_filter.ppdu_start = 1;
  8489. htt_tlv_filter.ppdu_end = 1;
  8490. htt_tlv_filter.ppdu_end_user_stats = 1;
  8491. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8492. htt_tlv_filter.ppdu_end_status_done = 1;
  8493. htt_tlv_filter.mpdu_start = 1;
  8494. htt_tlv_filter.enable_fp = 1;
  8495. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8496. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8497. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8498. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8499. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8500. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8501. for (mac_id = 0; mac_id < max_mac_rings;
  8502. mac_id++) {
  8503. int mac_for_pdev =
  8504. dp_get_mac_id_for_pdev(mac_id,
  8505. pdev->pdev_id);
  8506. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8507. mac_for_pdev,
  8508. pdev->rxdma_mon_status_ring[mac_id]
  8509. .hal_srng,
  8510. RXDMA_MONITOR_STATUS,
  8511. RX_BUFFER_SIZE_PKTLOG_LITE,
  8512. &htt_tlv_filter);
  8513. }
  8514. if (soc->reap_timer_init)
  8515. qdf_timer_mod(&soc->mon_reap_timer,
  8516. DP_INTR_POLL_TIMER_MS);
  8517. }
  8518. break;
  8519. case WDI_EVENT_LITE_T2H:
  8520. if (pdev->monitor_vdev) {
  8521. /* Nothing needs to be done if monitor mode is
  8522. * enabled
  8523. */
  8524. return 0;
  8525. }
  8526. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8527. int mac_for_pdev = dp_get_mac_id_for_pdev(
  8528. mac_id, pdev->pdev_id);
  8529. pdev->pktlog_ppdu_stats = true;
  8530. dp_h2t_cfg_stats_msg_send(pdev,
  8531. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  8532. mac_for_pdev);
  8533. }
  8534. break;
  8535. default:
  8536. /* Nothing needs to be done for other pktlog types */
  8537. break;
  8538. }
  8539. } else {
  8540. switch (event) {
  8541. case WDI_EVENT_RX_DESC:
  8542. case WDI_EVENT_LITE_RX:
  8543. if (pdev->monitor_vdev) {
  8544. /* Nothing needs to be done if monitor mode is
  8545. * enabled
  8546. */
  8547. return 0;
  8548. }
  8549. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  8550. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  8551. for (mac_id = 0; mac_id < max_mac_rings;
  8552. mac_id++) {
  8553. int mac_for_pdev =
  8554. dp_get_mac_id_for_pdev(mac_id,
  8555. pdev->pdev_id);
  8556. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8557. mac_for_pdev,
  8558. pdev->rxdma_mon_status_ring[mac_id]
  8559. .hal_srng,
  8560. RXDMA_MONITOR_STATUS,
  8561. RX_BUFFER_SIZE,
  8562. &htt_tlv_filter);
  8563. }
  8564. if (soc->reap_timer_init)
  8565. qdf_timer_stop(&soc->mon_reap_timer);
  8566. }
  8567. break;
  8568. case WDI_EVENT_LITE_T2H:
  8569. if (pdev->monitor_vdev) {
  8570. /* Nothing needs to be done if monitor mode is
  8571. * enabled
  8572. */
  8573. return 0;
  8574. }
  8575. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  8576. * passing value 0. Once these macros will define in htt
  8577. * header file will use proper macros
  8578. */
  8579. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8580. int mac_for_pdev =
  8581. dp_get_mac_id_for_pdev(mac_id,
  8582. pdev->pdev_id);
  8583. pdev->pktlog_ppdu_stats = false;
  8584. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  8585. dp_h2t_cfg_stats_msg_send(pdev, 0,
  8586. mac_for_pdev);
  8587. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  8588. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  8589. mac_for_pdev);
  8590. } else if (pdev->enhanced_stats_en) {
  8591. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  8592. mac_for_pdev);
  8593. }
  8594. }
  8595. break;
  8596. default:
  8597. /* Nothing needs to be done for other pktlog types */
  8598. break;
  8599. }
  8600. }
  8601. return 0;
  8602. }
  8603. #endif