dp_main.c 274 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_htt.h"
  30. #include "dp_types.h"
  31. #include "dp_internal.h"
  32. #include "dp_tx.h"
  33. #include "dp_tx_desc.h"
  34. #include "dp_rx.h"
  35. #include <cdp_txrx_handle.h>
  36. #include <wlan_cfg.h>
  37. #include "cdp_txrx_cmn_struct.h"
  38. #include "cdp_txrx_stats_struct.h"
  39. #include "cdp_txrx_cmn_reg.h"
  40. #include <qdf_util.h>
  41. #include "dp_peer.h"
  42. #include "dp_rx_mon.h"
  43. #include "htt_stats.h"
  44. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  45. #include "cfg_ucfg_api.h"
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. #include "cdp_txrx_flow_ctrl_v2.h"
  48. #else
  49. static inline void
  50. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  51. {
  52. return;
  53. }
  54. #endif
  55. #include "dp_ipa.h"
  56. #include "dp_cal_client_api.h"
  57. #ifdef CONFIG_MCL
  58. extern int con_mode_monitor;
  59. #ifndef REMOVE_PKT_LOG
  60. #include <pktlog_ac_api.h>
  61. #include <pktlog_ac.h>
  62. #endif
  63. #endif
  64. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
  65. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  66. static struct dp_soc *
  67. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  68. struct ol_if_ops *ol_ops, uint16_t device_id);
  69. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  70. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  71. uint8_t *peer_mac_addr,
  72. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  73. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
  74. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  75. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  76. #define DP_INTR_POLL_TIMER_MS 10
  77. /* Generic AST entry aging timer value */
  78. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  79. /* WDS AST entry aging timer value */
  80. #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
  81. #define DP_WDS_AST_AGING_TIMER_CNT \
  82. ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
  83. #define DP_MCS_LENGTH (6*MAX_MCS)
  84. #define DP_NSS_LENGTH (6*SS_COUNT)
  85. #define DP_MU_GROUP_SHOW 16
  86. #define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW)
  87. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  88. #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
  89. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  90. #define DP_MAX_MCS_STRING_LEN 30
  91. #define DP_CURR_FW_STATS_AVAIL 19
  92. #define DP_HTT_DBG_EXT_STATS_MAX 256
  93. #define DP_MAX_SLEEP_TIME 100
  94. #ifndef QCA_WIFI_3_0_EMU
  95. #define SUSPEND_DRAIN_WAIT 500
  96. #else
  97. #define SUSPEND_DRAIN_WAIT 3000
  98. #endif
  99. #ifdef IPA_OFFLOAD
  100. /* Exclude IPA rings from the interrupt context */
  101. #define TX_RING_MASK_VAL 0xb
  102. #define RX_RING_MASK_VAL 0x7
  103. #else
  104. #define TX_RING_MASK_VAL 0xF
  105. #define RX_RING_MASK_VAL 0xF
  106. #endif
  107. #define STR_MAXLEN 64
  108. #define DP_PPDU_STATS_CFG_ALL 0xFFFF
  109. /* PPDU stats mask sent to FW to enable enhanced stats */
  110. #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
  111. /* PPDU stats mask sent to FW to support debug sniffer feature */
  112. #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
  113. /* PPDU stats mask sent to FW to support BPR feature*/
  114. #define DP_PPDU_STATS_CFG_BPR 0x2000
  115. /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
  116. #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
  117. DP_PPDU_STATS_CFG_ENH_STATS)
  118. /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
  119. #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
  120. DP_PPDU_TXLITE_STATS_BITMASK_CFG)
  121. #define RNG_ERR "SRNG setup failed for"
  122. /**
  123. * default_dscp_tid_map - Default DSCP-TID mapping
  124. *
  125. * DSCP TID
  126. * 000000 0
  127. * 001000 1
  128. * 010000 2
  129. * 011000 3
  130. * 100000 4
  131. * 101000 5
  132. * 110000 6
  133. * 111000 7
  134. */
  135. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  136. 0, 0, 0, 0, 0, 0, 0, 0,
  137. 1, 1, 1, 1, 1, 1, 1, 1,
  138. 2, 2, 2, 2, 2, 2, 2, 2,
  139. 3, 3, 3, 3, 3, 3, 3, 3,
  140. 4, 4, 4, 4, 4, 4, 4, 4,
  141. 5, 5, 5, 5, 5, 5, 5, 5,
  142. 6, 6, 6, 6, 6, 6, 6, 6,
  143. 7, 7, 7, 7, 7, 7, 7, 7,
  144. };
  145. /*
  146. * struct dp_rate_debug
  147. *
  148. * @mcs_type: print string for a given mcs
  149. * @valid: valid mcs rate?
  150. */
  151. struct dp_rate_debug {
  152. char mcs_type[DP_MAX_MCS_STRING_LEN];
  153. uint8_t valid;
  154. };
  155. #define MCS_VALID 1
  156. #define MCS_INVALID 0
  157. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  158. {
  159. {"OFDM 48 Mbps", MCS_VALID},
  160. {"OFDM 24 Mbps", MCS_VALID},
  161. {"OFDM 12 Mbps", MCS_VALID},
  162. {"OFDM 6 Mbps ", MCS_VALID},
  163. {"OFDM 54 Mbps", MCS_VALID},
  164. {"OFDM 36 Mbps", MCS_VALID},
  165. {"OFDM 18 Mbps", MCS_VALID},
  166. {"OFDM 9 Mbps ", MCS_VALID},
  167. {"INVALID ", MCS_INVALID},
  168. {"INVALID ", MCS_INVALID},
  169. {"INVALID ", MCS_INVALID},
  170. {"INVALID ", MCS_INVALID},
  171. {"INVALID ", MCS_VALID},
  172. },
  173. {
  174. {"CCK 11 Mbps Long ", MCS_VALID},
  175. {"CCK 5.5 Mbps Long ", MCS_VALID},
  176. {"CCK 2 Mbps Long ", MCS_VALID},
  177. {"CCK 1 Mbps Long ", MCS_VALID},
  178. {"CCK 11 Mbps Short ", MCS_VALID},
  179. {"CCK 5.5 Mbps Short", MCS_VALID},
  180. {"CCK 2 Mbps Short ", MCS_VALID},
  181. {"INVALID ", MCS_INVALID},
  182. {"INVALID ", MCS_INVALID},
  183. {"INVALID ", MCS_INVALID},
  184. {"INVALID ", MCS_INVALID},
  185. {"INVALID ", MCS_INVALID},
  186. {"INVALID ", MCS_VALID},
  187. },
  188. {
  189. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  190. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  191. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  192. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  193. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  194. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  195. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  196. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  197. {"INVALID ", MCS_INVALID},
  198. {"INVALID ", MCS_INVALID},
  199. {"INVALID ", MCS_INVALID},
  200. {"INVALID ", MCS_INVALID},
  201. {"INVALID ", MCS_VALID},
  202. },
  203. {
  204. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  205. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  206. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  207. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  208. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  209. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  210. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  211. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  212. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  213. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  214. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  215. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  216. {"INVALID ", MCS_VALID},
  217. },
  218. {
  219. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  220. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  221. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  222. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  223. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  224. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  225. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  226. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  227. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  228. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  229. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  230. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  231. {"INVALID ", MCS_VALID},
  232. }
  233. };
  234. /**
  235. * dp_cpu_ring_map_type - dp tx cpu ring map
  236. * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
  237. * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
  238. * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
  239. * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
  240. * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
  241. * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
  242. */
  243. enum dp_cpu_ring_map_types {
  244. DP_NSS_DEFAULT_MAP,
  245. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  246. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  247. DP_NSS_DBDC_OFFLOADED_MAP,
  248. DP_NSS_DBTC_OFFLOADED_MAP,
  249. DP_NSS_CPU_RING_MAP_MAX
  250. };
  251. /**
  252. * @brief Cpu to tx ring map
  253. */
  254. #ifdef CONFIG_WIN
  255. static uint8_t
  256. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  257. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  258. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  259. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  260. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  261. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  262. };
  263. #else
  264. static uint8_t
  265. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  266. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  267. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  268. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  269. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  270. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  271. };
  272. #endif
  273. /**
  274. * @brief Select the type of statistics
  275. */
  276. enum dp_stats_type {
  277. STATS_FW = 0,
  278. STATS_HOST = 1,
  279. STATS_TYPE_MAX = 2,
  280. };
  281. /**
  282. * @brief General Firmware statistics options
  283. *
  284. */
  285. enum dp_fw_stats {
  286. TXRX_FW_STATS_INVALID = -1,
  287. };
  288. /**
  289. * dp_stats_mapping_table - Firmware and Host statistics
  290. * currently supported
  291. */
  292. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  293. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  294. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  295. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  296. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  297. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  298. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  299. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  300. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  301. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  302. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  303. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  304. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  305. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  306. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  307. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  308. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  309. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  310. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  311. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  312. /* Last ENUM for HTT FW STATS */
  313. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  314. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  315. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  316. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  317. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  318. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  319. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  320. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  321. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  322. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  323. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  324. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  325. };
  326. /* MCL specific functions */
  327. #ifdef CONFIG_MCL
  328. /**
  329. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  330. * @soc: pointer to dp_soc handle
  331. * @intr_ctx_num: interrupt context number for which mon mask is needed
  332. *
  333. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  334. * This function is returning 0, since in interrupt mode(softirq based RX),
  335. * we donot want to process monitor mode rings in a softirq.
  336. *
  337. * So, in case packet log is enabled for SAP/STA/P2P modes,
  338. * regular interrupt processing will not process monitor mode rings. It would be
  339. * done in a separate timer context.
  340. *
  341. * Return: 0
  342. */
  343. static inline
  344. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  345. {
  346. return 0;
  347. }
  348. /*
  349. * dp_service_mon_rings()- timer to reap monitor rings
  350. * reqd as we are not getting ppdu end interrupts
  351. * @arg: SoC Handle
  352. *
  353. * Return:
  354. *
  355. */
  356. static void dp_service_mon_rings(void *arg)
  357. {
  358. struct dp_soc *soc = (struct dp_soc *)arg;
  359. int ring = 0, work_done, mac_id;
  360. struct dp_pdev *pdev = NULL;
  361. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  362. pdev = soc->pdev_list[ring];
  363. if (!pdev)
  364. continue;
  365. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  366. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  367. pdev->pdev_id);
  368. work_done = dp_mon_process(soc, mac_for_pdev,
  369. QCA_NAPI_BUDGET);
  370. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  371. FL("Reaped %d descs from Monitor rings"),
  372. work_done);
  373. }
  374. }
  375. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  376. }
  377. #ifndef REMOVE_PKT_LOG
  378. /**
  379. * dp_pkt_log_init() - API to initialize packet log
  380. * @ppdev: physical device handle
  381. * @scn: HIF context
  382. *
  383. * Return: none
  384. */
  385. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  386. {
  387. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  388. if (handle->pkt_log_init) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "%s: Packet log not initialized", __func__);
  391. return;
  392. }
  393. pktlog_sethandle(&handle->pl_dev, scn);
  394. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  395. if (pktlogmod_init(scn)) {
  396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  397. "%s: pktlogmod_init failed", __func__);
  398. handle->pkt_log_init = false;
  399. } else {
  400. handle->pkt_log_init = true;
  401. }
  402. }
  403. /**
  404. * dp_pkt_log_con_service() - connect packet log service
  405. * @ppdev: physical device handle
  406. * @scn: device context
  407. *
  408. * Return: none
  409. */
  410. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  411. {
  412. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  413. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  414. pktlog_htc_attach();
  415. }
  416. /**
  417. * dp_get_num_rx_contexts() - get number of RX contexts
  418. * @soc_hdl: cdp opaque soc handle
  419. *
  420. * Return: number of RX contexts
  421. */
  422. static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
  423. {
  424. int i;
  425. int num_rx_contexts = 0;
  426. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  427. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  428. if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
  429. num_rx_contexts++;
  430. return num_rx_contexts;
  431. }
  432. /**
  433. * dp_pktlogmod_exit() - API to cleanup pktlog info
  434. * @handle: Pdev handle
  435. *
  436. * Return: none
  437. */
  438. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  439. {
  440. void *scn = (void *)handle->soc->hif_handle;
  441. if (!scn) {
  442. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  443. "%s: Invalid hif(scn) handle", __func__);
  444. return;
  445. }
  446. pktlogmod_exit(scn);
  447. handle->pkt_log_init = false;
  448. }
  449. #endif
  450. #else
  451. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  452. /**
  453. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  454. * @soc: pointer to dp_soc handle
  455. * @intr_ctx_num: interrupt context number for which mon mask is needed
  456. *
  457. * Return: mon mask value
  458. */
  459. static inline
  460. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  461. {
  462. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  463. }
  464. #endif
  465. /**
  466. * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
  467. * @cdp_opaque_vdev: pointer to cdp_vdev
  468. *
  469. * Return: pointer to dp_vdev
  470. */
  471. static
  472. struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
  473. {
  474. return (struct dp_vdev *)cdp_opaque_vdev;
  475. }
  476. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  477. struct cdp_peer *peer_hdl,
  478. uint8_t *mac_addr,
  479. enum cdp_txrx_ast_entry_type type,
  480. uint32_t flags)
  481. {
  482. return dp_peer_add_ast((struct dp_soc *)soc_hdl,
  483. (struct dp_peer *)peer_hdl,
  484. mac_addr,
  485. type,
  486. flags);
  487. }
  488. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  489. struct cdp_peer *peer_hdl,
  490. uint8_t *wds_macaddr,
  491. uint32_t flags)
  492. {
  493. int status = -1;
  494. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  495. struct dp_ast_entry *ast_entry = NULL;
  496. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  497. qdf_spin_lock_bh(&soc->ast_lock);
  498. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  499. peer->vdev->pdev->pdev_id);
  500. if (ast_entry) {
  501. status = dp_peer_update_ast(soc,
  502. peer,
  503. ast_entry, flags);
  504. }
  505. qdf_spin_unlock_bh(&soc->ast_lock);
  506. return status;
  507. }
  508. /*
  509. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  510. * @soc_handle: Datapath SOC handle
  511. * @wds_macaddr: WDS entry MAC Address
  512. * Return: None
  513. */
  514. static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  515. uint8_t *wds_macaddr, void *vdev_handle)
  516. {
  517. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  518. struct dp_ast_entry *ast_entry = NULL;
  519. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  520. qdf_spin_lock_bh(&soc->ast_lock);
  521. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  522. vdev->pdev->pdev_id);
  523. if (ast_entry) {
  524. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  525. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  526. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
  527. ast_entry->is_active = TRUE;
  528. }
  529. }
  530. qdf_spin_unlock_bh(&soc->ast_lock);
  531. }
  532. /*
  533. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  534. * @soc: Datapath SOC handle
  535. *
  536. * Return: None
  537. */
  538. static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  539. void *vdev_hdl)
  540. {
  541. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  542. struct dp_pdev *pdev;
  543. struct dp_vdev *vdev;
  544. struct dp_peer *peer;
  545. struct dp_ast_entry *ase, *temp_ase;
  546. int i;
  547. qdf_spin_lock_bh(&soc->ast_lock);
  548. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  549. pdev = soc->pdev_list[i];
  550. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  551. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  552. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  553. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  554. if ((ase->type ==
  555. CDP_TXRX_AST_TYPE_STATIC) ||
  556. (ase->type ==
  557. CDP_TXRX_AST_TYPE_SELF) ||
  558. (ase->type ==
  559. CDP_TXRX_AST_TYPE_STA_BSS))
  560. continue;
  561. ase->is_active = TRUE;
  562. }
  563. }
  564. }
  565. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  566. }
  567. qdf_spin_unlock_bh(&soc->ast_lock);
  568. }
  569. /*
  570. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  571. * @soc: Datapath SOC handle
  572. *
  573. * Return: None
  574. */
  575. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  576. {
  577. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  578. struct dp_pdev *pdev;
  579. struct dp_vdev *vdev;
  580. struct dp_peer *peer;
  581. struct dp_ast_entry *ase, *temp_ase;
  582. int i;
  583. qdf_spin_lock_bh(&soc->ast_lock);
  584. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  585. pdev = soc->pdev_list[i];
  586. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  587. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  588. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  589. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  590. if ((ase->type ==
  591. CDP_TXRX_AST_TYPE_STATIC) ||
  592. (ase->type ==
  593. CDP_TXRX_AST_TYPE_SELF) ||
  594. (ase->type ==
  595. CDP_TXRX_AST_TYPE_STA_BSS))
  596. continue;
  597. dp_peer_del_ast(soc, ase);
  598. }
  599. }
  600. }
  601. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  602. }
  603. qdf_spin_unlock_bh(&soc->ast_lock);
  604. }
  605. /**
  606. * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
  607. * and return ast entry information
  608. * of first ast entry found in the
  609. * table with given mac address
  610. *
  611. * @soc : data path soc handle
  612. * @ast_mac_addr : AST entry mac address
  613. * @ast_entry_info : ast entry information
  614. *
  615. * return : true if ast entry found with ast_mac_addr
  616. * false if ast entry not found
  617. */
  618. static bool dp_peer_get_ast_info_by_soc_wifi3
  619. (struct cdp_soc_t *soc_hdl,
  620. uint8_t *ast_mac_addr,
  621. struct cdp_ast_entry_info *ast_entry_info)
  622. {
  623. struct dp_ast_entry *ast_entry;
  624. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  625. qdf_spin_lock_bh(&soc->ast_lock);
  626. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  627. if (ast_entry && !ast_entry->delete_in_progress) {
  628. ast_entry_info->type = ast_entry->type;
  629. ast_entry_info->pdev_id = ast_entry->pdev_id;
  630. ast_entry_info->vdev_id = ast_entry->vdev_id;
  631. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  632. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  633. &ast_entry->peer->mac_addr.raw[0],
  634. DP_MAC_ADDR_LEN);
  635. qdf_spin_unlock_bh(&soc->ast_lock);
  636. return true;
  637. }
  638. qdf_spin_unlock_bh(&soc->ast_lock);
  639. return false;
  640. }
  641. /**
  642. * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
  643. * and return ast entry information
  644. * if mac address and pdev_id matches
  645. *
  646. * @soc : data path soc handle
  647. * @ast_mac_addr : AST entry mac address
  648. * @pdev_id : pdev_id
  649. * @ast_entry_info : ast entry information
  650. *
  651. * return : true if ast entry found with ast_mac_addr
  652. * false if ast entry not found
  653. */
  654. static bool dp_peer_get_ast_info_by_pdevid_wifi3
  655. (struct cdp_soc_t *soc_hdl,
  656. uint8_t *ast_mac_addr,
  657. uint8_t pdev_id,
  658. struct cdp_ast_entry_info *ast_entry_info)
  659. {
  660. struct dp_ast_entry *ast_entry;
  661. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  662. qdf_spin_lock_bh(&soc->ast_lock);
  663. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  664. if (ast_entry && !ast_entry->delete_in_progress) {
  665. ast_entry_info->type = ast_entry->type;
  666. ast_entry_info->pdev_id = ast_entry->pdev_id;
  667. ast_entry_info->vdev_id = ast_entry->vdev_id;
  668. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  669. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  670. &ast_entry->peer->mac_addr.raw[0],
  671. DP_MAC_ADDR_LEN);
  672. qdf_spin_unlock_bh(&soc->ast_lock);
  673. return true;
  674. }
  675. qdf_spin_unlock_bh(&soc->ast_lock);
  676. return false;
  677. }
  678. /**
  679. * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
  680. * with given mac address
  681. *
  682. * @soc : data path soc handle
  683. * @ast_mac_addr : AST entry mac address
  684. * @callback : callback function to called on ast delete response from FW
  685. * @cookie : argument to be passed to callback
  686. *
  687. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  688. * is sent
  689. * QDF_STATUS_E_INVAL false if ast entry not found
  690. */
  691. static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
  692. uint8_t *mac_addr,
  693. txrx_ast_free_cb callback,
  694. void *cookie)
  695. {
  696. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  697. struct dp_ast_entry *ast_entry;
  698. txrx_ast_free_cb cb = NULL;
  699. void *arg = NULL;
  700. qdf_spin_lock_bh(&soc->ast_lock);
  701. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  702. if (!ast_entry) {
  703. qdf_spin_unlock_bh(&soc->ast_lock);
  704. return -QDF_STATUS_E_INVAL;
  705. }
  706. if (ast_entry->callback) {
  707. cb = ast_entry->callback;
  708. arg = ast_entry->cookie;
  709. }
  710. ast_entry->callback = callback;
  711. ast_entry->cookie = cookie;
  712. /*
  713. * if delete_in_progress is set AST delete is sent to target
  714. * and host is waiting for response should not send delete
  715. * again
  716. */
  717. if (!ast_entry->delete_in_progress)
  718. dp_peer_del_ast(soc, ast_entry);
  719. qdf_spin_unlock_bh(&soc->ast_lock);
  720. if (cb) {
  721. cb(soc->ctrl_psoc,
  722. soc,
  723. arg,
  724. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  725. }
  726. return QDF_STATUS_SUCCESS;
  727. }
  728. /**
  729. * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
  730. * table if mac address and pdev_id matches
  731. *
  732. * @soc : data path soc handle
  733. * @ast_mac_addr : AST entry mac address
  734. * @pdev_id : pdev id
  735. * @callback : callback function to called on ast delete response from FW
  736. * @cookie : argument to be passed to callback
  737. *
  738. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  739. * is sent
  740. * QDF_STATUS_E_INVAL false if ast entry not found
  741. */
  742. static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
  743. uint8_t *mac_addr,
  744. uint8_t pdev_id,
  745. txrx_ast_free_cb callback,
  746. void *cookie)
  747. {
  748. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  749. struct dp_ast_entry *ast_entry;
  750. txrx_ast_free_cb cb = NULL;
  751. void *arg = NULL;
  752. qdf_spin_lock_bh(&soc->ast_lock);
  753. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
  754. if (!ast_entry) {
  755. qdf_spin_unlock_bh(&soc->ast_lock);
  756. return -QDF_STATUS_E_INVAL;
  757. }
  758. if (ast_entry->callback) {
  759. cb = ast_entry->callback;
  760. arg = ast_entry->cookie;
  761. }
  762. ast_entry->callback = callback;
  763. ast_entry->cookie = cookie;
  764. /*
  765. * if delete_in_progress is set AST delete is sent to target
  766. * and host is waiting for response should not sent delete
  767. * again
  768. */
  769. if (!ast_entry->delete_in_progress)
  770. dp_peer_del_ast(soc, ast_entry);
  771. qdf_spin_unlock_bh(&soc->ast_lock);
  772. if (cb) {
  773. cb(soc->ctrl_psoc,
  774. soc,
  775. arg,
  776. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  777. }
  778. return QDF_STATUS_SUCCESS;
  779. }
  780. /**
  781. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  782. * @ring_num: ring num of the ring being queried
  783. * @grp_mask: the grp_mask array for the ring type in question.
  784. *
  785. * The grp_mask array is indexed by group number and the bit fields correspond
  786. * to ring numbers. We are finding which interrupt group a ring belongs to.
  787. *
  788. * Return: the index in the grp_mask array with the ring number.
  789. * -QDF_STATUS_E_NOENT if no entry is found
  790. */
  791. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  792. {
  793. int ext_group_num;
  794. int mask = 1 << ring_num;
  795. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  796. ext_group_num++) {
  797. if (mask & grp_mask[ext_group_num])
  798. return ext_group_num;
  799. }
  800. return -QDF_STATUS_E_NOENT;
  801. }
  802. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  803. enum hal_ring_type ring_type,
  804. int ring_num)
  805. {
  806. int *grp_mask;
  807. switch (ring_type) {
  808. case WBM2SW_RELEASE:
  809. /* dp_tx_comp_handler - soc->tx_comp_ring */
  810. if (ring_num < 3)
  811. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  812. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  813. else if (ring_num == 3) {
  814. /* sw treats this as a separate ring type */
  815. grp_mask = &soc->wlan_cfg_ctx->
  816. int_rx_wbm_rel_ring_mask[0];
  817. ring_num = 0;
  818. } else {
  819. qdf_assert(0);
  820. return -QDF_STATUS_E_NOENT;
  821. }
  822. break;
  823. case REO_EXCEPTION:
  824. /* dp_rx_err_process - &soc->reo_exception_ring */
  825. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  826. break;
  827. case REO_DST:
  828. /* dp_rx_process - soc->reo_dest_ring */
  829. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  830. break;
  831. case REO_STATUS:
  832. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  833. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  834. break;
  835. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  836. case RXDMA_MONITOR_STATUS:
  837. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  838. case RXDMA_MONITOR_DST:
  839. /* dp_mon_process */
  840. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  841. break;
  842. case RXDMA_DST:
  843. /* dp_rxdma_err_process */
  844. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  845. break;
  846. case RXDMA_BUF:
  847. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  848. break;
  849. case RXDMA_MONITOR_BUF:
  850. /* TODO: support low_thresh interrupt */
  851. return -QDF_STATUS_E_NOENT;
  852. break;
  853. case TCL_DATA:
  854. case TCL_CMD:
  855. case REO_CMD:
  856. case SW2WBM_RELEASE:
  857. case WBM_IDLE_LINK:
  858. /* normally empty SW_TO_HW rings */
  859. return -QDF_STATUS_E_NOENT;
  860. break;
  861. case TCL_STATUS:
  862. case REO_REINJECT:
  863. /* misc unused rings */
  864. return -QDF_STATUS_E_NOENT;
  865. break;
  866. case CE_SRC:
  867. case CE_DST:
  868. case CE_DST_STATUS:
  869. /* CE_rings - currently handled by hif */
  870. default:
  871. return -QDF_STATUS_E_NOENT;
  872. break;
  873. }
  874. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  875. }
  876. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  877. *ring_params, int ring_type, int ring_num)
  878. {
  879. int msi_group_number;
  880. int msi_data_count;
  881. int ret;
  882. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  883. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  884. &msi_data_count, &msi_data_start,
  885. &msi_irq_start);
  886. if (ret)
  887. return;
  888. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  889. ring_num);
  890. if (msi_group_number < 0) {
  891. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  892. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  893. ring_type, ring_num);
  894. ring_params->msi_addr = 0;
  895. ring_params->msi_data = 0;
  896. return;
  897. }
  898. if (msi_group_number > msi_data_count) {
  899. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  900. FL("2 msi_groups will share an msi; msi_group_num %d"),
  901. msi_group_number);
  902. QDF_ASSERT(0);
  903. }
  904. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  905. ring_params->msi_addr = addr_low;
  906. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  907. ring_params->msi_data = (msi_group_number % msi_data_count)
  908. + msi_data_start;
  909. ring_params->flags |= HAL_SRNG_MSI_INTR;
  910. }
  911. /**
  912. * dp_print_ast_stats() - Dump AST table contents
  913. * @soc: Datapath soc handle
  914. *
  915. * return void
  916. */
  917. #ifdef FEATURE_AST
  918. void dp_print_ast_stats(struct dp_soc *soc)
  919. {
  920. uint8_t i;
  921. uint8_t num_entries = 0;
  922. struct dp_vdev *vdev;
  923. struct dp_pdev *pdev;
  924. struct dp_peer *peer;
  925. struct dp_ast_entry *ase, *tmp_ase;
  926. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  927. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  928. "DA", "HMWDS_SEC"};
  929. DP_PRINT_STATS("AST Stats:");
  930. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  931. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  932. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  933. DP_PRINT_STATS("AST Table:");
  934. qdf_spin_lock_bh(&soc->ast_lock);
  935. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  936. pdev = soc->pdev_list[i];
  937. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  938. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  939. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  940. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  941. DP_PRINT_STATS("%6d mac_addr = %pM"
  942. " peer_mac_addr = %pM"
  943. " peer_id = %u"
  944. " type = %s"
  945. " next_hop = %d"
  946. " is_active = %d"
  947. " is_bss = %d"
  948. " ast_idx = %d"
  949. " ast_hash = %d"
  950. " delete_in_progress = %d"
  951. " pdev_id = %d"
  952. " vdev_id = %d",
  953. ++num_entries,
  954. ase->mac_addr.raw,
  955. ase->peer->mac_addr.raw,
  956. ase->peer->peer_ids[0],
  957. type[ase->type],
  958. ase->next_hop,
  959. ase->is_active,
  960. ase->is_bss,
  961. ase->ast_idx,
  962. ase->ast_hash_value,
  963. ase->delete_in_progress,
  964. ase->pdev_id,
  965. ase->vdev_id);
  966. }
  967. }
  968. }
  969. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  970. }
  971. qdf_spin_unlock_bh(&soc->ast_lock);
  972. }
  973. #else
  974. void dp_print_ast_stats(struct dp_soc *soc)
  975. {
  976. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  977. return;
  978. }
  979. #endif
  980. /**
  981. * dp_print_peer_table() - Dump all Peer stats
  982. * @vdev: Datapath Vdev handle
  983. *
  984. * return void
  985. */
  986. static void dp_print_peer_table(struct dp_vdev *vdev)
  987. {
  988. struct dp_peer *peer = NULL;
  989. DP_PRINT_STATS("Dumping Peer Table Stats:");
  990. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  991. if (!peer) {
  992. DP_PRINT_STATS("Invalid Peer");
  993. return;
  994. }
  995. DP_PRINT_STATS(" peer_mac_addr = %pM"
  996. " nawds_enabled = %d"
  997. " bss_peer = %d"
  998. " wapi = %d"
  999. " wds_enabled = %d"
  1000. " delete in progress = %d"
  1001. " peer id = %d",
  1002. peer->mac_addr.raw,
  1003. peer->nawds_enabled,
  1004. peer->bss_peer,
  1005. peer->wapi,
  1006. peer->wds_enabled,
  1007. peer->delete_in_progress,
  1008. peer->peer_ids[0]);
  1009. }
  1010. }
  1011. /*
  1012. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  1013. */
  1014. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  1015. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  1016. {
  1017. void *hal_soc = soc->hal_soc;
  1018. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  1019. /* TODO: See if we should get align size from hal */
  1020. uint32_t ring_base_align = 8;
  1021. struct hal_srng_params ring_params;
  1022. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  1023. /* TODO: Currently hal layer takes care of endianness related settings.
  1024. * See if these settings need to passed from DP layer
  1025. */
  1026. ring_params.flags = 0;
  1027. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  1028. srng->hal_srng = NULL;
  1029. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  1030. srng->num_entries = num_entries;
  1031. if (!soc->dp_soc_reinit) {
  1032. srng->base_vaddr_unaligned =
  1033. qdf_mem_alloc_consistent(soc->osdev,
  1034. soc->osdev->dev,
  1035. srng->alloc_size,
  1036. &srng->base_paddr_unaligned);
  1037. }
  1038. if (!srng->base_vaddr_unaligned) {
  1039. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1040. FL("alloc failed - ring_type: %d, ring_num %d"),
  1041. ring_type, ring_num);
  1042. return QDF_STATUS_E_NOMEM;
  1043. }
  1044. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  1045. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  1046. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  1047. ((unsigned long)(ring_params.ring_base_vaddr) -
  1048. (unsigned long)srng->base_vaddr_unaligned);
  1049. ring_params.num_entries = num_entries;
  1050. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  1051. FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
  1052. ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
  1053. (void *)ring_params.ring_base_paddr, ring_params.num_entries);
  1054. if (soc->intr_mode == DP_INTR_MSI) {
  1055. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  1056. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1057. FL("Using MSI for ring_type: %d, ring_num %d"),
  1058. ring_type, ring_num);
  1059. } else {
  1060. ring_params.msi_data = 0;
  1061. ring_params.msi_addr = 0;
  1062. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1063. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  1064. ring_type, ring_num);
  1065. }
  1066. /*
  1067. * Setup interrupt timer and batch counter thresholds for
  1068. * interrupt mitigation based on ring type
  1069. */
  1070. if (ring_type == REO_DST) {
  1071. ring_params.intr_timer_thres_us =
  1072. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1073. ring_params.intr_batch_cntr_thres_entries =
  1074. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  1075. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  1076. ring_params.intr_timer_thres_us =
  1077. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  1078. ring_params.intr_batch_cntr_thres_entries =
  1079. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  1080. } else {
  1081. ring_params.intr_timer_thres_us =
  1082. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1083. ring_params.intr_batch_cntr_thres_entries =
  1084. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1085. }
  1086. /* Enable low threshold interrupts for rx buffer rings (regular and
  1087. * monitor buffer rings.
  1088. * TODO: See if this is required for any other ring
  1089. */
  1090. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  1091. (ring_type == RXDMA_MONITOR_STATUS)) {
  1092. /* TODO: Setting low threshold to 1/8th of ring size
  1093. * see if this needs to be configurable
  1094. */
  1095. ring_params.low_threshold = num_entries >> 3;
  1096. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1097. ring_params.intr_timer_thres_us =
  1098. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1099. ring_params.intr_batch_cntr_thres_entries = 0;
  1100. }
  1101. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1102. mac_id, &ring_params);
  1103. if (!srng->hal_srng) {
  1104. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1105. srng->alloc_size,
  1106. srng->base_vaddr_unaligned,
  1107. srng->base_paddr_unaligned, 0);
  1108. }
  1109. return 0;
  1110. }
  1111. /*
  1112. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1113. * @soc: DP SOC handle
  1114. * @srng: source ring structure
  1115. * @ring_type: type of ring
  1116. * @ring_num: ring number
  1117. *
  1118. * Return: None
  1119. */
  1120. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1121. int ring_type, int ring_num)
  1122. {
  1123. if (!srng->hal_srng) {
  1124. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1125. FL("Ring type: %d, num:%d not setup"),
  1126. ring_type, ring_num);
  1127. return;
  1128. }
  1129. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1130. srng->hal_srng = NULL;
  1131. }
  1132. /**
  1133. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1134. * Any buffers allocated and attached to ring entries are expected to be freed
  1135. * before calling this function.
  1136. */
  1137. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1138. int ring_type, int ring_num)
  1139. {
  1140. if (!soc->dp_soc_reinit) {
  1141. if (!srng->hal_srng && (srng->alloc_size == 0)) {
  1142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1143. FL("Ring type: %d, num:%d not setup"),
  1144. ring_type, ring_num);
  1145. return;
  1146. }
  1147. if (srng->hal_srng) {
  1148. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1149. srng->hal_srng = NULL;
  1150. }
  1151. }
  1152. if (srng->alloc_size) {
  1153. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1154. srng->alloc_size,
  1155. srng->base_vaddr_unaligned,
  1156. srng->base_paddr_unaligned, 0);
  1157. srng->alloc_size = 0;
  1158. }
  1159. }
  1160. /* TODO: Need this interface from HIF */
  1161. void *hif_get_hal_handle(void *hif_handle);
  1162. /*
  1163. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1164. * @dp_ctx: DP SOC handle
  1165. * @budget: Number of frames/descriptors that can be processed in one shot
  1166. *
  1167. * Return: remaining budget/quota for the soc device
  1168. */
  1169. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1170. {
  1171. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1172. struct dp_soc *soc = int_ctx->soc;
  1173. int ring = 0;
  1174. uint32_t work_done = 0;
  1175. int budget = dp_budget;
  1176. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1177. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1178. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1179. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1180. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1181. uint32_t remaining_quota = dp_budget;
  1182. struct dp_pdev *pdev = NULL;
  1183. int mac_id;
  1184. /* Process Tx completion interrupts first to return back buffers */
  1185. while (tx_mask) {
  1186. if (tx_mask & 0x1) {
  1187. work_done = dp_tx_comp_handler(soc,
  1188. soc->tx_comp_ring[ring].hal_srng,
  1189. remaining_quota);
  1190. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1191. "tx mask 0x%x ring %d, budget %d, work_done %d",
  1192. tx_mask, ring, budget, work_done);
  1193. budget -= work_done;
  1194. if (budget <= 0)
  1195. goto budget_done;
  1196. remaining_quota = budget;
  1197. }
  1198. tx_mask = tx_mask >> 1;
  1199. ring++;
  1200. }
  1201. /* Process REO Exception ring interrupt */
  1202. if (rx_err_mask) {
  1203. work_done = dp_rx_err_process(soc,
  1204. soc->reo_exception_ring.hal_srng,
  1205. remaining_quota);
  1206. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1207. "REO Exception Ring: work_done %d budget %d",
  1208. work_done, budget);
  1209. budget -= work_done;
  1210. if (budget <= 0) {
  1211. goto budget_done;
  1212. }
  1213. remaining_quota = budget;
  1214. }
  1215. /* Process Rx WBM release ring interrupt */
  1216. if (rx_wbm_rel_mask) {
  1217. work_done = dp_rx_wbm_err_process(soc,
  1218. soc->rx_rel_ring.hal_srng, remaining_quota);
  1219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1220. "WBM Release Ring: work_done %d budget %d",
  1221. work_done, budget);
  1222. budget -= work_done;
  1223. if (budget <= 0) {
  1224. goto budget_done;
  1225. }
  1226. remaining_quota = budget;
  1227. }
  1228. /* Process Rx interrupts */
  1229. if (rx_mask) {
  1230. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1231. if (rx_mask & (1 << ring)) {
  1232. work_done = dp_rx_process(int_ctx,
  1233. soc->reo_dest_ring[ring].hal_srng,
  1234. ring,
  1235. remaining_quota);
  1236. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1237. "rx mask 0x%x ring %d, work_done %d budget %d",
  1238. rx_mask, ring, work_done, budget);
  1239. budget -= work_done;
  1240. if (budget <= 0)
  1241. goto budget_done;
  1242. remaining_quota = budget;
  1243. }
  1244. }
  1245. }
  1246. if (reo_status_mask)
  1247. dp_reo_status_ring_handler(soc);
  1248. /* Process LMAC interrupts */
  1249. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1250. pdev = soc->pdev_list[ring];
  1251. if (pdev == NULL)
  1252. continue;
  1253. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1254. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1255. pdev->pdev_id);
  1256. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1257. work_done = dp_mon_process(soc, mac_for_pdev,
  1258. remaining_quota);
  1259. budget -= work_done;
  1260. if (budget <= 0)
  1261. goto budget_done;
  1262. remaining_quota = budget;
  1263. }
  1264. if (int_ctx->rxdma2host_ring_mask &
  1265. (1 << mac_for_pdev)) {
  1266. work_done = dp_rxdma_err_process(soc,
  1267. mac_for_pdev,
  1268. remaining_quota);
  1269. budget -= work_done;
  1270. if (budget <= 0)
  1271. goto budget_done;
  1272. remaining_quota = budget;
  1273. }
  1274. if (int_ctx->host2rxdma_ring_mask &
  1275. (1 << mac_for_pdev)) {
  1276. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1277. union dp_rx_desc_list_elem_t *tail = NULL;
  1278. struct dp_srng *rx_refill_buf_ring =
  1279. &pdev->rx_refill_buf_ring;
  1280. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1281. 1);
  1282. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1283. rx_refill_buf_ring,
  1284. &soc->rx_desc_buf[mac_for_pdev], 0,
  1285. &desc_list, &tail);
  1286. }
  1287. }
  1288. }
  1289. qdf_lro_flush(int_ctx->lro_ctx);
  1290. budget_done:
  1291. return dp_budget - budget;
  1292. }
  1293. /* dp_interrupt_timer()- timer poll for interrupts
  1294. *
  1295. * @arg: SoC Handle
  1296. *
  1297. * Return:
  1298. *
  1299. */
  1300. static void dp_interrupt_timer(void *arg)
  1301. {
  1302. struct dp_soc *soc = (struct dp_soc *) arg;
  1303. int i;
  1304. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1305. for (i = 0;
  1306. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1307. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1308. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1309. }
  1310. }
  1311. /*
  1312. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1313. * @txrx_soc: DP SOC handle
  1314. *
  1315. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1316. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1317. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1318. *
  1319. * Return: 0 for success, nonzero for failure.
  1320. */
  1321. static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
  1322. {
  1323. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1324. int i;
  1325. soc->intr_mode = DP_INTR_POLL;
  1326. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1327. soc->intr_ctx[i].dp_intr_id = i;
  1328. soc->intr_ctx[i].tx_ring_mask =
  1329. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1330. soc->intr_ctx[i].rx_ring_mask =
  1331. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1332. soc->intr_ctx[i].rx_mon_ring_mask =
  1333. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1334. soc->intr_ctx[i].rx_err_ring_mask =
  1335. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1336. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1337. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1338. soc->intr_ctx[i].reo_status_ring_mask =
  1339. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1340. soc->intr_ctx[i].rxdma2host_ring_mask =
  1341. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1342. soc->intr_ctx[i].soc = soc;
  1343. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1344. }
  1345. qdf_timer_init(soc->osdev, &soc->int_timer,
  1346. dp_interrupt_timer, (void *)soc,
  1347. QDF_TIMER_TYPE_WAKE_APPS);
  1348. return QDF_STATUS_SUCCESS;
  1349. }
  1350. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  1351. #if defined(CONFIG_MCL)
  1352. /*
  1353. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1354. * @txrx_soc: DP SOC handle
  1355. *
  1356. * Call the appropriate attach function based on the mode of operation.
  1357. * This is a WAR for enabling monitor mode.
  1358. *
  1359. * Return: 0 for success. nonzero for failure.
  1360. */
  1361. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1362. {
  1363. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1364. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1365. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  1366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1367. "%s: Poll mode", __func__);
  1368. return dp_soc_attach_poll(txrx_soc);
  1369. } else {
  1370. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1371. "%s: Interrupt mode", __func__);
  1372. return dp_soc_interrupt_attach(txrx_soc);
  1373. }
  1374. }
  1375. #else
  1376. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1377. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1378. {
  1379. return dp_soc_attach_poll(txrx_soc);
  1380. }
  1381. #else
  1382. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1383. {
  1384. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1385. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1386. return dp_soc_attach_poll(txrx_soc);
  1387. else
  1388. return dp_soc_interrupt_attach(txrx_soc);
  1389. }
  1390. #endif
  1391. #endif
  1392. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1393. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1394. {
  1395. int j;
  1396. int num_irq = 0;
  1397. int tx_mask =
  1398. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1399. int rx_mask =
  1400. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1401. int rx_mon_mask =
  1402. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1403. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1404. soc->wlan_cfg_ctx, intr_ctx_num);
  1405. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1406. soc->wlan_cfg_ctx, intr_ctx_num);
  1407. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1408. soc->wlan_cfg_ctx, intr_ctx_num);
  1409. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1410. soc->wlan_cfg_ctx, intr_ctx_num);
  1411. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1412. soc->wlan_cfg_ctx, intr_ctx_num);
  1413. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1414. soc->wlan_cfg_ctx, intr_ctx_num);
  1415. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1416. if (tx_mask & (1 << j)) {
  1417. irq_id_map[num_irq++] =
  1418. (wbm2host_tx_completions_ring1 - j);
  1419. }
  1420. if (rx_mask & (1 << j)) {
  1421. irq_id_map[num_irq++] =
  1422. (reo2host_destination_ring1 - j);
  1423. }
  1424. if (rxdma2host_ring_mask & (1 << j)) {
  1425. irq_id_map[num_irq++] =
  1426. rxdma2host_destination_ring_mac1 -
  1427. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1428. }
  1429. if (host2rxdma_ring_mask & (1 << j)) {
  1430. irq_id_map[num_irq++] =
  1431. host2rxdma_host_buf_ring_mac1 -
  1432. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1433. }
  1434. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1435. irq_id_map[num_irq++] =
  1436. host2rxdma_monitor_ring1 -
  1437. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1438. }
  1439. if (rx_mon_mask & (1 << j)) {
  1440. irq_id_map[num_irq++] =
  1441. ppdu_end_interrupts_mac1 -
  1442. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1443. irq_id_map[num_irq++] =
  1444. rxdma2host_monitor_status_ring_mac1 -
  1445. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1446. }
  1447. if (rx_wbm_rel_ring_mask & (1 << j))
  1448. irq_id_map[num_irq++] = wbm2host_rx_release;
  1449. if (rx_err_ring_mask & (1 << j))
  1450. irq_id_map[num_irq++] = reo2host_exception;
  1451. if (reo_status_ring_mask & (1 << j))
  1452. irq_id_map[num_irq++] = reo2host_status;
  1453. }
  1454. *num_irq_r = num_irq;
  1455. }
  1456. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1457. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1458. int msi_vector_count, int msi_vector_start)
  1459. {
  1460. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1461. soc->wlan_cfg_ctx, intr_ctx_num);
  1462. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1463. soc->wlan_cfg_ctx, intr_ctx_num);
  1464. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1465. soc->wlan_cfg_ctx, intr_ctx_num);
  1466. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1467. soc->wlan_cfg_ctx, intr_ctx_num);
  1468. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1469. soc->wlan_cfg_ctx, intr_ctx_num);
  1470. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1471. soc->wlan_cfg_ctx, intr_ctx_num);
  1472. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1473. soc->wlan_cfg_ctx, intr_ctx_num);
  1474. unsigned int vector =
  1475. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1476. int num_irq = 0;
  1477. soc->intr_mode = DP_INTR_MSI;
  1478. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1479. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1480. irq_id_map[num_irq++] =
  1481. pld_get_msi_irq(soc->osdev->dev, vector);
  1482. *num_irq_r = num_irq;
  1483. }
  1484. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1485. int *irq_id_map, int *num_irq)
  1486. {
  1487. int msi_vector_count, ret;
  1488. uint32_t msi_base_data, msi_vector_start;
  1489. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1490. &msi_vector_count,
  1491. &msi_base_data,
  1492. &msi_vector_start);
  1493. if (ret)
  1494. return dp_soc_interrupt_map_calculate_integrated(soc,
  1495. intr_ctx_num, irq_id_map, num_irq);
  1496. else
  1497. dp_soc_interrupt_map_calculate_msi(soc,
  1498. intr_ctx_num, irq_id_map, num_irq,
  1499. msi_vector_count, msi_vector_start);
  1500. }
  1501. /*
  1502. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1503. * @txrx_soc: DP SOC handle
  1504. *
  1505. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1506. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1507. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1508. *
  1509. * Return: 0 for success. nonzero for failure.
  1510. */
  1511. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  1512. {
  1513. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1514. int i = 0;
  1515. int num_irq = 0;
  1516. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1517. int ret = 0;
  1518. /* Map of IRQ ids registered with one interrupt context */
  1519. int irq_id_map[HIF_MAX_GRP_IRQ];
  1520. int tx_mask =
  1521. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1522. int rx_mask =
  1523. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1524. int rx_mon_mask =
  1525. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1526. int rx_err_ring_mask =
  1527. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1528. int rx_wbm_rel_ring_mask =
  1529. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1530. int reo_status_ring_mask =
  1531. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1532. int rxdma2host_ring_mask =
  1533. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1534. int host2rxdma_ring_mask =
  1535. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1536. int host2rxdma_mon_ring_mask =
  1537. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1538. soc->wlan_cfg_ctx, i);
  1539. soc->intr_ctx[i].dp_intr_id = i;
  1540. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1541. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1542. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1543. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1544. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1545. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1546. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1547. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1548. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1549. host2rxdma_mon_ring_mask;
  1550. soc->intr_ctx[i].soc = soc;
  1551. num_irq = 0;
  1552. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1553. &num_irq);
  1554. ret = hif_register_ext_group(soc->hif_handle,
  1555. num_irq, irq_id_map, dp_service_srngs,
  1556. &soc->intr_ctx[i], "dp_intr",
  1557. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1558. if (ret) {
  1559. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1560. FL("failed, ret = %d"), ret);
  1561. return QDF_STATUS_E_FAILURE;
  1562. }
  1563. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1564. }
  1565. hif_configure_ext_group_interrupts(soc->hif_handle);
  1566. return QDF_STATUS_SUCCESS;
  1567. }
  1568. /*
  1569. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1570. * @txrx_soc: DP SOC handle
  1571. *
  1572. * Return: void
  1573. */
  1574. static void dp_soc_interrupt_detach(void *txrx_soc)
  1575. {
  1576. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1577. int i;
  1578. if (soc->intr_mode == DP_INTR_POLL) {
  1579. qdf_timer_stop(&soc->int_timer);
  1580. qdf_timer_free(&soc->int_timer);
  1581. } else {
  1582. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1583. }
  1584. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1585. soc->intr_ctx[i].tx_ring_mask = 0;
  1586. soc->intr_ctx[i].rx_ring_mask = 0;
  1587. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1588. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1589. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1590. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1591. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1592. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1593. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1594. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1595. }
  1596. }
  1597. #define AVG_MAX_MPDUS_PER_TID 128
  1598. #define AVG_TIDS_PER_CLIENT 2
  1599. #define AVG_FLOWS_PER_TID 2
  1600. #define AVG_MSDUS_PER_FLOW 128
  1601. #define AVG_MSDUS_PER_MPDU 4
  1602. /*
  1603. * Allocate and setup link descriptor pool that will be used by HW for
  1604. * various link and queue descriptors and managed by WBM
  1605. */
  1606. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1607. {
  1608. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1609. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1610. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1611. uint32_t num_mpdus_per_link_desc =
  1612. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1613. uint32_t num_msdus_per_link_desc =
  1614. hal_num_msdus_per_link_desc(soc->hal_soc);
  1615. uint32_t num_mpdu_links_per_queue_desc =
  1616. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1617. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1618. uint32_t total_link_descs, total_mem_size;
  1619. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1620. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1621. uint32_t num_link_desc_banks;
  1622. uint32_t last_bank_size = 0;
  1623. uint32_t entry_size, num_entries;
  1624. int i;
  1625. uint32_t desc_id = 0;
  1626. qdf_dma_addr_t *baseaddr = NULL;
  1627. /* Only Tx queue descriptors are allocated from common link descriptor
  1628. * pool Rx queue descriptors are not included in this because (REO queue
  1629. * extension descriptors) they are expected to be allocated contiguously
  1630. * with REO queue descriptors
  1631. */
  1632. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1633. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1634. num_mpdu_queue_descs = num_mpdu_link_descs /
  1635. num_mpdu_links_per_queue_desc;
  1636. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1637. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1638. num_msdus_per_link_desc;
  1639. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1640. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1641. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1642. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1643. /* Round up to power of 2 */
  1644. total_link_descs = 1;
  1645. while (total_link_descs < num_entries)
  1646. total_link_descs <<= 1;
  1647. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1648. FL("total_link_descs: %u, link_desc_size: %d"),
  1649. total_link_descs, link_desc_size);
  1650. total_mem_size = total_link_descs * link_desc_size;
  1651. total_mem_size += link_desc_align;
  1652. if (total_mem_size <= max_alloc_size) {
  1653. num_link_desc_banks = 0;
  1654. last_bank_size = total_mem_size;
  1655. } else {
  1656. num_link_desc_banks = (total_mem_size) /
  1657. (max_alloc_size - link_desc_align);
  1658. last_bank_size = total_mem_size %
  1659. (max_alloc_size - link_desc_align);
  1660. }
  1661. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1662. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1663. total_mem_size, num_link_desc_banks);
  1664. for (i = 0; i < num_link_desc_banks; i++) {
  1665. if (!soc->dp_soc_reinit) {
  1666. baseaddr = &soc->link_desc_banks[i].
  1667. base_paddr_unaligned;
  1668. soc->link_desc_banks[i].base_vaddr_unaligned =
  1669. qdf_mem_alloc_consistent(soc->osdev,
  1670. soc->osdev->dev,
  1671. max_alloc_size,
  1672. baseaddr);
  1673. }
  1674. soc->link_desc_banks[i].size = max_alloc_size;
  1675. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1676. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1677. ((unsigned long)(
  1678. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1679. link_desc_align));
  1680. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1681. soc->link_desc_banks[i].base_paddr_unaligned) +
  1682. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1683. (unsigned long)(
  1684. soc->link_desc_banks[i].base_vaddr_unaligned));
  1685. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1686. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1687. FL("Link descriptor memory alloc failed"));
  1688. goto fail;
  1689. }
  1690. }
  1691. if (last_bank_size) {
  1692. /* Allocate last bank in case total memory required is not exact
  1693. * multiple of max_alloc_size
  1694. */
  1695. if (!soc->dp_soc_reinit) {
  1696. baseaddr = &soc->link_desc_banks[i].
  1697. base_paddr_unaligned;
  1698. soc->link_desc_banks[i].base_vaddr_unaligned =
  1699. qdf_mem_alloc_consistent(soc->osdev,
  1700. soc->osdev->dev,
  1701. last_bank_size,
  1702. baseaddr);
  1703. }
  1704. soc->link_desc_banks[i].size = last_bank_size;
  1705. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1706. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1707. ((unsigned long)(
  1708. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1709. link_desc_align));
  1710. soc->link_desc_banks[i].base_paddr =
  1711. (unsigned long)(
  1712. soc->link_desc_banks[i].base_paddr_unaligned) +
  1713. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1714. (unsigned long)(
  1715. soc->link_desc_banks[i].base_vaddr_unaligned));
  1716. }
  1717. /* Allocate and setup link descriptor idle list for HW internal use */
  1718. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1719. total_mem_size = entry_size * total_link_descs;
  1720. if (total_mem_size <= max_alloc_size) {
  1721. void *desc;
  1722. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1723. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1724. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1725. FL("Link desc idle ring setup failed"));
  1726. goto fail;
  1727. }
  1728. hal_srng_access_start_unlocked(soc->hal_soc,
  1729. soc->wbm_idle_link_ring.hal_srng);
  1730. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1731. soc->link_desc_banks[i].base_paddr; i++) {
  1732. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1733. ((unsigned long)(
  1734. soc->link_desc_banks[i].base_vaddr) -
  1735. (unsigned long)(
  1736. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1737. / link_desc_size;
  1738. unsigned long paddr = (unsigned long)(
  1739. soc->link_desc_banks[i].base_paddr);
  1740. while (num_entries && (desc = hal_srng_src_get_next(
  1741. soc->hal_soc,
  1742. soc->wbm_idle_link_ring.hal_srng))) {
  1743. hal_set_link_desc_addr(desc,
  1744. LINK_DESC_COOKIE(desc_id, i), paddr);
  1745. num_entries--;
  1746. desc_id++;
  1747. paddr += link_desc_size;
  1748. }
  1749. }
  1750. hal_srng_access_end_unlocked(soc->hal_soc,
  1751. soc->wbm_idle_link_ring.hal_srng);
  1752. } else {
  1753. uint32_t num_scatter_bufs;
  1754. uint32_t num_entries_per_buf;
  1755. uint32_t rem_entries;
  1756. uint8_t *scatter_buf_ptr;
  1757. uint16_t scatter_buf_num;
  1758. uint32_t buf_size = 0;
  1759. soc->wbm_idle_scatter_buf_size =
  1760. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1761. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1762. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1763. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1764. soc->hal_soc, total_mem_size,
  1765. soc->wbm_idle_scatter_buf_size);
  1766. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1767. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1768. FL("scatter bufs size out of bounds"));
  1769. goto fail;
  1770. }
  1771. for (i = 0; i < num_scatter_bufs; i++) {
  1772. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1773. if (!soc->dp_soc_reinit) {
  1774. buf_size = soc->wbm_idle_scatter_buf_size;
  1775. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1776. qdf_mem_alloc_consistent(soc->osdev,
  1777. soc->osdev->
  1778. dev,
  1779. buf_size,
  1780. baseaddr);
  1781. }
  1782. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1783. QDF_TRACE(QDF_MODULE_ID_DP,
  1784. QDF_TRACE_LEVEL_ERROR,
  1785. FL("Scatter lst memory alloc fail"));
  1786. goto fail;
  1787. }
  1788. }
  1789. /* Populate idle list scatter buffers with link descriptor
  1790. * pointers
  1791. */
  1792. scatter_buf_num = 0;
  1793. scatter_buf_ptr = (uint8_t *)(
  1794. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1795. rem_entries = num_entries_per_buf;
  1796. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1797. soc->link_desc_banks[i].base_paddr; i++) {
  1798. uint32_t num_link_descs =
  1799. (soc->link_desc_banks[i].size -
  1800. ((unsigned long)(
  1801. soc->link_desc_banks[i].base_vaddr) -
  1802. (unsigned long)(
  1803. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1804. / link_desc_size;
  1805. unsigned long paddr = (unsigned long)(
  1806. soc->link_desc_banks[i].base_paddr);
  1807. while (num_link_descs) {
  1808. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1809. LINK_DESC_COOKIE(desc_id, i), paddr);
  1810. num_link_descs--;
  1811. desc_id++;
  1812. paddr += link_desc_size;
  1813. rem_entries--;
  1814. if (rem_entries) {
  1815. scatter_buf_ptr += entry_size;
  1816. } else {
  1817. rem_entries = num_entries_per_buf;
  1818. scatter_buf_num++;
  1819. if (scatter_buf_num >= num_scatter_bufs)
  1820. break;
  1821. scatter_buf_ptr = (uint8_t *)(
  1822. soc->wbm_idle_scatter_buf_base_vaddr[
  1823. scatter_buf_num]);
  1824. }
  1825. }
  1826. }
  1827. /* Setup link descriptor idle list in HW */
  1828. hal_setup_link_idle_list(soc->hal_soc,
  1829. soc->wbm_idle_scatter_buf_base_paddr,
  1830. soc->wbm_idle_scatter_buf_base_vaddr,
  1831. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1832. (uint32_t)(scatter_buf_ptr -
  1833. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1834. scatter_buf_num-1])), total_link_descs);
  1835. }
  1836. return 0;
  1837. fail:
  1838. if (soc->wbm_idle_link_ring.hal_srng) {
  1839. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1840. WBM_IDLE_LINK, 0);
  1841. }
  1842. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1843. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1844. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1845. soc->wbm_idle_scatter_buf_size,
  1846. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1847. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1848. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1849. }
  1850. }
  1851. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1852. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1853. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1854. soc->link_desc_banks[i].size,
  1855. soc->link_desc_banks[i].base_vaddr_unaligned,
  1856. soc->link_desc_banks[i].base_paddr_unaligned,
  1857. 0);
  1858. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1859. }
  1860. }
  1861. return QDF_STATUS_E_FAILURE;
  1862. }
  1863. /*
  1864. * Free link descriptor pool that was setup HW
  1865. */
  1866. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1867. {
  1868. int i;
  1869. if (soc->wbm_idle_link_ring.hal_srng) {
  1870. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1871. WBM_IDLE_LINK, 0);
  1872. }
  1873. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1874. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1875. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1876. soc->wbm_idle_scatter_buf_size,
  1877. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1878. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1879. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1880. }
  1881. }
  1882. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1883. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1884. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1885. soc->link_desc_banks[i].size,
  1886. soc->link_desc_banks[i].base_vaddr_unaligned,
  1887. soc->link_desc_banks[i].base_paddr_unaligned,
  1888. 0);
  1889. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1890. }
  1891. }
  1892. }
  1893. #ifdef IPA_OFFLOAD
  1894. #define REO_DST_RING_SIZE_QCA6290 1023
  1895. #ifndef QCA_WIFI_QCA8074_VP
  1896. #define REO_DST_RING_SIZE_QCA8074 1023
  1897. #else
  1898. #define REO_DST_RING_SIZE_QCA8074 8
  1899. #endif /* QCA_WIFI_QCA8074_VP */
  1900. #else
  1901. #define REO_DST_RING_SIZE_QCA6290 1024
  1902. #ifndef QCA_WIFI_QCA8074_VP
  1903. #define REO_DST_RING_SIZE_QCA8074 2048
  1904. #else
  1905. #define REO_DST_RING_SIZE_QCA8074 8
  1906. #endif /* QCA_WIFI_QCA8074_VP */
  1907. #endif /* IPA_OFFLOAD */
  1908. /*
  1909. * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
  1910. * @soc: Datapath SOC handle
  1911. *
  1912. * This is a timer function used to age out stale AST nodes from
  1913. * AST table
  1914. */
  1915. #ifdef FEATURE_WDS
  1916. static void dp_ast_aging_timer_fn(void *soc_hdl)
  1917. {
  1918. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1919. struct dp_pdev *pdev;
  1920. struct dp_vdev *vdev;
  1921. struct dp_peer *peer;
  1922. struct dp_ast_entry *ase, *temp_ase;
  1923. int i;
  1924. bool check_wds_ase = false;
  1925. if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
  1926. soc->wds_ast_aging_timer_cnt = 0;
  1927. check_wds_ase = true;
  1928. }
  1929. /* Peer list access lock */
  1930. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1931. /* AST list access lock */
  1932. qdf_spin_lock_bh(&soc->ast_lock);
  1933. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1934. pdev = soc->pdev_list[i];
  1935. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1936. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1937. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1938. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1939. /*
  1940. * Do not expire static ast entries
  1941. * and HM WDS entries
  1942. */
  1943. if (ase->type !=
  1944. CDP_TXRX_AST_TYPE_WDS &&
  1945. ase->type !=
  1946. CDP_TXRX_AST_TYPE_MEC &&
  1947. ase->type !=
  1948. CDP_TXRX_AST_TYPE_DA)
  1949. continue;
  1950. /* Expire MEC entry every n sec.
  1951. * This needs to be expired in
  1952. * case if STA backbone is made as
  1953. * AP backbone, In this case it needs
  1954. * to be re-added as a WDS entry.
  1955. */
  1956. if (ase->is_active && ase->type ==
  1957. CDP_TXRX_AST_TYPE_MEC) {
  1958. ase->is_active = FALSE;
  1959. continue;
  1960. } else if (ase->is_active &&
  1961. check_wds_ase) {
  1962. ase->is_active = FALSE;
  1963. continue;
  1964. }
  1965. if (ase->type ==
  1966. CDP_TXRX_AST_TYPE_MEC) {
  1967. DP_STATS_INC(soc,
  1968. ast.aged_out, 1);
  1969. dp_peer_del_ast(soc, ase);
  1970. } else if (check_wds_ase) {
  1971. DP_STATS_INC(soc,
  1972. ast.aged_out, 1);
  1973. dp_peer_del_ast(soc, ase);
  1974. }
  1975. }
  1976. }
  1977. }
  1978. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1979. }
  1980. qdf_spin_unlock_bh(&soc->ast_lock);
  1981. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1982. if (qdf_atomic_read(&soc->cmn_init_done))
  1983. qdf_timer_mod(&soc->ast_aging_timer,
  1984. DP_AST_AGING_TIMER_DEFAULT_MS);
  1985. }
  1986. /*
  1987. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1988. * @soc: Datapath SOC handle
  1989. *
  1990. * Return: None
  1991. */
  1992. static void dp_soc_wds_attach(struct dp_soc *soc)
  1993. {
  1994. soc->wds_ast_aging_timer_cnt = 0;
  1995. qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
  1996. dp_ast_aging_timer_fn, (void *)soc,
  1997. QDF_TIMER_TYPE_WAKE_APPS);
  1998. qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
  1999. }
  2000. /*
  2001. * dp_soc_wds_detach() - Detach WDS data structures and timers
  2002. * @txrx_soc: DP SOC handle
  2003. *
  2004. * Return: None
  2005. */
  2006. static void dp_soc_wds_detach(struct dp_soc *soc)
  2007. {
  2008. qdf_timer_stop(&soc->ast_aging_timer);
  2009. qdf_timer_free(&soc->ast_aging_timer);
  2010. }
  2011. #else
  2012. static void dp_soc_wds_attach(struct dp_soc *soc)
  2013. {
  2014. }
  2015. static void dp_soc_wds_detach(struct dp_soc *soc)
  2016. {
  2017. }
  2018. #endif
  2019. /*
  2020. * dp_soc_reset_ring_map() - Reset cpu ring map
  2021. * @soc: Datapath soc handler
  2022. *
  2023. * This api resets the default cpu ring map
  2024. */
  2025. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  2026. {
  2027. uint8_t i;
  2028. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2029. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  2030. switch (nss_config) {
  2031. case dp_nss_cfg_first_radio:
  2032. /*
  2033. * Setting Tx ring map for one nss offloaded radio
  2034. */
  2035. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  2036. break;
  2037. case dp_nss_cfg_second_radio:
  2038. /*
  2039. * Setting Tx ring for two nss offloaded radios
  2040. */
  2041. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  2042. break;
  2043. case dp_nss_cfg_dbdc:
  2044. /*
  2045. * Setting Tx ring map for 2 nss offloaded radios
  2046. */
  2047. soc->tx_ring_map[i] =
  2048. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  2049. break;
  2050. case dp_nss_cfg_dbtc:
  2051. /*
  2052. * Setting Tx ring map for 3 nss offloaded radios
  2053. */
  2054. soc->tx_ring_map[i] =
  2055. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  2056. break;
  2057. default:
  2058. dp_err("tx_ring_map failed due to invalid nss cfg");
  2059. break;
  2060. }
  2061. }
  2062. }
  2063. /*
  2064. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  2065. * @dp_soc - DP soc handle
  2066. * @ring_type - ring type
  2067. * @ring_num - ring_num
  2068. *
  2069. * return 0 or 1
  2070. */
  2071. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  2072. {
  2073. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2074. uint8_t status = 0;
  2075. switch (ring_type) {
  2076. case WBM2SW_RELEASE:
  2077. case REO_DST:
  2078. case RXDMA_BUF:
  2079. status = ((nss_config) & (1 << ring_num));
  2080. break;
  2081. default:
  2082. break;
  2083. }
  2084. return status;
  2085. }
  2086. /*
  2087. * dp_soc_reset_intr_mask() - reset interrupt mask
  2088. * @dp_soc - DP Soc handle
  2089. *
  2090. * Return: Return void
  2091. */
  2092. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  2093. {
  2094. uint8_t j;
  2095. int *grp_mask = NULL;
  2096. int group_number, mask, num_ring;
  2097. /* number of tx ring */
  2098. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  2099. /*
  2100. * group mask for tx completion ring.
  2101. */
  2102. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  2103. /* loop and reset the mask for only offloaded ring */
  2104. for (j = 0; j < num_ring; j++) {
  2105. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  2106. continue;
  2107. }
  2108. /*
  2109. * Group number corresponding to tx offloaded ring.
  2110. */
  2111. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2112. if (group_number < 0) {
  2113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2114. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2115. WBM2SW_RELEASE, j);
  2116. return;
  2117. }
  2118. /* reset the tx mask for offloaded ring */
  2119. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2120. mask &= (~(1 << j));
  2121. /*
  2122. * reset the interrupt mask for offloaded ring.
  2123. */
  2124. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2125. }
  2126. /* number of rx rings */
  2127. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2128. /*
  2129. * group mask for reo destination ring.
  2130. */
  2131. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2132. /* loop and reset the mask for only offloaded ring */
  2133. for (j = 0; j < num_ring; j++) {
  2134. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2135. continue;
  2136. }
  2137. /*
  2138. * Group number corresponding to rx offloaded ring.
  2139. */
  2140. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2141. if (group_number < 0) {
  2142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2143. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2144. REO_DST, j);
  2145. return;
  2146. }
  2147. /* set the interrupt mask for offloaded ring */
  2148. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2149. mask &= (~(1 << j));
  2150. /*
  2151. * set the interrupt mask to zero for rx offloaded radio.
  2152. */
  2153. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2154. }
  2155. /*
  2156. * group mask for Rx buffer refill ring
  2157. */
  2158. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2159. /* loop and reset the mask for only offloaded ring */
  2160. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2161. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2162. continue;
  2163. }
  2164. /*
  2165. * Group number corresponding to rx offloaded ring.
  2166. */
  2167. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2168. if (group_number < 0) {
  2169. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2170. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2171. REO_DST, j);
  2172. return;
  2173. }
  2174. /* set the interrupt mask for offloaded ring */
  2175. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2176. group_number);
  2177. mask &= (~(1 << j));
  2178. /*
  2179. * set the interrupt mask to zero for rx offloaded radio.
  2180. */
  2181. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2182. group_number, mask);
  2183. }
  2184. }
  2185. #ifdef IPA_OFFLOAD
  2186. /**
  2187. * dp_reo_remap_config() - configure reo remap register value based
  2188. * nss configuration.
  2189. * based on offload_radio value below remap configuration
  2190. * get applied.
  2191. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2192. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2193. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2194. * 3 - both Radios handled by NSS (remap not required)
  2195. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2196. *
  2197. * @remap1: output parameter indicates reo remap 1 register value
  2198. * @remap2: output parameter indicates reo remap 2 register value
  2199. * Return: bool type, true if remap is configured else false.
  2200. */
  2201. static bool dp_reo_remap_config(struct dp_soc *soc,
  2202. uint32_t *remap1,
  2203. uint32_t *remap2)
  2204. {
  2205. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  2206. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  2207. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  2208. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  2209. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2210. return true;
  2211. }
  2212. #else
  2213. static bool dp_reo_remap_config(struct dp_soc *soc,
  2214. uint32_t *remap1,
  2215. uint32_t *remap2)
  2216. {
  2217. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2218. switch (offload_radio) {
  2219. case dp_nss_cfg_default:
  2220. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2221. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2222. (0x3 << 18) | (0x4 << 21)) << 8;
  2223. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2224. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2225. (0x3 << 18) | (0x4 << 21)) << 8;
  2226. break;
  2227. case dp_nss_cfg_first_radio:
  2228. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  2229. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  2230. (0x2 << 18) | (0x3 << 21)) << 8;
  2231. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  2232. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  2233. (0x4 << 18) | (0x2 << 21)) << 8;
  2234. break;
  2235. case dp_nss_cfg_second_radio:
  2236. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  2237. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  2238. (0x1 << 18) | (0x3 << 21)) << 8;
  2239. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  2240. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  2241. (0x4 << 18) | (0x1 << 21)) << 8;
  2242. break;
  2243. case dp_nss_cfg_dbdc:
  2244. case dp_nss_cfg_dbtc:
  2245. /* return false if both or all are offloaded to NSS */
  2246. return false;
  2247. }
  2248. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2249. *remap1, *remap2, offload_radio);
  2250. return true;
  2251. }
  2252. #endif
  2253. /*
  2254. * dp_reo_frag_dst_set() - configure reo register to set the
  2255. * fragment destination ring
  2256. * @soc : Datapath soc
  2257. * @frag_dst_ring : output parameter to set fragment destination ring
  2258. *
  2259. * Based on offload_radio below fragment destination rings is selected
  2260. * 0 - TCL
  2261. * 1 - SW1
  2262. * 2 - SW2
  2263. * 3 - SW3
  2264. * 4 - SW4
  2265. * 5 - Release
  2266. * 6 - FW
  2267. * 7 - alternate select
  2268. *
  2269. * return: void
  2270. */
  2271. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2272. {
  2273. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2274. switch (offload_radio) {
  2275. case dp_nss_cfg_default:
  2276. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2277. break;
  2278. case dp_nss_cfg_dbdc:
  2279. case dp_nss_cfg_dbtc:
  2280. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2281. break;
  2282. default:
  2283. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2284. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2285. break;
  2286. }
  2287. }
  2288. /*
  2289. * dp_soc_cmn_setup() - Common SoC level initializion
  2290. * @soc: Datapath SOC handle
  2291. *
  2292. * This is an internal function used to setup common SOC data structures,
  2293. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2294. */
  2295. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2296. {
  2297. int i;
  2298. struct hal_reo_params reo_params;
  2299. int tx_ring_size;
  2300. int tx_comp_ring_size;
  2301. int reo_dst_ring_size;
  2302. uint32_t entries;
  2303. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2304. if (qdf_atomic_read(&soc->cmn_init_done))
  2305. return 0;
  2306. if (dp_hw_link_desc_pool_setup(soc))
  2307. goto fail1;
  2308. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2309. /* Setup SRNG rings */
  2310. /* Common rings */
  2311. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2312. wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
  2313. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2314. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2315. goto fail1;
  2316. }
  2317. soc->num_tcl_data_rings = 0;
  2318. /* Tx data rings */
  2319. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2320. soc->num_tcl_data_rings =
  2321. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2322. tx_comp_ring_size =
  2323. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2324. tx_ring_size =
  2325. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2326. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2327. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2328. TCL_DATA, i, 0, tx_ring_size)) {
  2329. QDF_TRACE(QDF_MODULE_ID_DP,
  2330. QDF_TRACE_LEVEL_ERROR,
  2331. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2332. goto fail1;
  2333. }
  2334. /*
  2335. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2336. * count
  2337. */
  2338. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2339. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  2340. QDF_TRACE(QDF_MODULE_ID_DP,
  2341. QDF_TRACE_LEVEL_ERROR,
  2342. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2343. goto fail1;
  2344. }
  2345. }
  2346. } else {
  2347. /* This will be incremented during per pdev ring setup */
  2348. soc->num_tcl_data_rings = 0;
  2349. }
  2350. if (dp_tx_soc_attach(soc)) {
  2351. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2352. FL("dp_tx_soc_attach failed"));
  2353. goto fail1;
  2354. }
  2355. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2356. /* TCL command and status rings */
  2357. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2358. entries)) {
  2359. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2360. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2361. goto fail1;
  2362. }
  2363. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2364. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2365. entries)) {
  2366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2367. FL("dp_srng_setup failed for tcl_status_ring"));
  2368. goto fail1;
  2369. }
  2370. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2371. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2372. * descriptors
  2373. */
  2374. /* Rx data rings */
  2375. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2376. soc->num_reo_dest_rings =
  2377. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2378. QDF_TRACE(QDF_MODULE_ID_DP,
  2379. QDF_TRACE_LEVEL_INFO,
  2380. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2381. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2382. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2383. i, 0, reo_dst_ring_size)) {
  2384. QDF_TRACE(QDF_MODULE_ID_DP,
  2385. QDF_TRACE_LEVEL_ERROR,
  2386. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2387. goto fail1;
  2388. }
  2389. }
  2390. } else {
  2391. /* This will be incremented during per pdev ring setup */
  2392. soc->num_reo_dest_rings = 0;
  2393. }
  2394. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2395. /* LMAC RxDMA to SW Rings configuration */
  2396. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2397. /* Only valid for MCL */
  2398. struct dp_pdev *pdev = soc->pdev_list[0];
  2399. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2400. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2401. RXDMA_DST, 0, i,
  2402. entries)) {
  2403. QDF_TRACE(QDF_MODULE_ID_DP,
  2404. QDF_TRACE_LEVEL_ERROR,
  2405. FL(RNG_ERR "rxdma_err_dst_ring"));
  2406. goto fail1;
  2407. }
  2408. }
  2409. }
  2410. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2411. /* REO reinjection ring */
  2412. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2413. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2414. entries)) {
  2415. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2416. FL("dp_srng_setup failed for reo_reinject_ring"));
  2417. goto fail1;
  2418. }
  2419. /* Rx release ring */
  2420. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2421. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
  2422. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2423. FL("dp_srng_setup failed for rx_rel_ring"));
  2424. goto fail1;
  2425. }
  2426. /* Rx exception ring */
  2427. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2428. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2429. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
  2430. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2431. FL("dp_srng_setup failed for reo_exception_ring"));
  2432. goto fail1;
  2433. }
  2434. /* REO command and status rings */
  2435. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2436. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
  2437. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2438. FL("dp_srng_setup failed for reo_cmd_ring"));
  2439. goto fail1;
  2440. }
  2441. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2442. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2443. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2444. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2445. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
  2446. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2447. FL("dp_srng_setup failed for reo_status_ring"));
  2448. goto fail1;
  2449. }
  2450. /* Reset the cpu ring map if radio is NSS offloaded */
  2451. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2452. dp_soc_reset_cpu_ring_map(soc);
  2453. dp_soc_reset_intr_mask(soc);
  2454. }
  2455. /* Setup HW REO */
  2456. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2457. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2458. /*
  2459. * Reo ring remap is not required if both radios
  2460. * are offloaded to NSS
  2461. */
  2462. if (!dp_reo_remap_config(soc,
  2463. &reo_params.remap1,
  2464. &reo_params.remap2))
  2465. goto out;
  2466. reo_params.rx_hash_enabled = true;
  2467. }
  2468. /* setup the global rx defrag waitlist */
  2469. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2470. soc->rx.defrag.timeout_ms =
  2471. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2472. soc->rx.defrag.next_flush_ms = 0;
  2473. soc->rx.flags.defrag_timeout_check =
  2474. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2475. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2476. out:
  2477. /*
  2478. * set the fragment destination ring
  2479. */
  2480. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2481. hal_reo_setup(soc->hal_soc, &reo_params);
  2482. qdf_atomic_set(&soc->cmn_init_done, 1);
  2483. dp_soc_wds_attach(soc);
  2484. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2485. return 0;
  2486. fail1:
  2487. /*
  2488. * Cleanup will be done as part of soc_detach, which will
  2489. * be called on pdev attach failure
  2490. */
  2491. return QDF_STATUS_E_FAILURE;
  2492. }
  2493. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  2494. static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2495. {
  2496. struct cdp_lro_hash_config lro_hash;
  2497. QDF_STATUS status;
  2498. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2499. !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
  2500. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2501. dp_err("LRO, GRO and RX hash disabled");
  2502. return QDF_STATUS_E_FAILURE;
  2503. }
  2504. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2505. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
  2506. wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
  2507. lro_hash.lro_enable = 1;
  2508. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2509. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2510. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2511. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2512. }
  2513. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2514. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2515. LRO_IPV4_SEED_ARR_SZ));
  2516. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2517. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2518. LRO_IPV6_SEED_ARR_SZ));
  2519. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2520. if (!soc->cdp_soc.ol_ops->lro_hash_config) {
  2521. QDF_BUG(0);
  2522. dp_err("lro_hash_config not configured");
  2523. return QDF_STATUS_E_FAILURE;
  2524. }
  2525. status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
  2526. &lro_hash);
  2527. if (!QDF_IS_STATUS_SUCCESS(status)) {
  2528. dp_err("failed to send lro_hash_config to FW %u", status);
  2529. return status;
  2530. }
  2531. dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2532. lro_hash.lro_enable, lro_hash.tcp_flag,
  2533. lro_hash.tcp_flag_mask);
  2534. dp_info("toeplitz_hash_ipv4:");
  2535. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2536. (void *)lro_hash.toeplitz_hash_ipv4,
  2537. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2538. LRO_IPV4_SEED_ARR_SZ));
  2539. dp_info("toeplitz_hash_ipv6:");
  2540. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2541. (void *)lro_hash.toeplitz_hash_ipv6,
  2542. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2543. LRO_IPV6_SEED_ARR_SZ));
  2544. return status;
  2545. }
  2546. /*
  2547. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2548. * @soc: data path SoC handle
  2549. * @pdev: Physical device handle
  2550. *
  2551. * Return: 0 - success, > 0 - failure
  2552. */
  2553. #ifdef QCA_HOST2FW_RXBUF_RING
  2554. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2555. struct dp_pdev *pdev)
  2556. {
  2557. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2558. int max_mac_rings;
  2559. int i;
  2560. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2561. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2562. for (i = 0; i < max_mac_rings; i++) {
  2563. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2564. "%s: pdev_id %d mac_id %d",
  2565. __func__, pdev->pdev_id, i);
  2566. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2567. RXDMA_BUF, 1, i,
  2568. wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
  2569. QDF_TRACE(QDF_MODULE_ID_DP,
  2570. QDF_TRACE_LEVEL_ERROR,
  2571. FL("failed rx mac ring setup"));
  2572. return QDF_STATUS_E_FAILURE;
  2573. }
  2574. }
  2575. return QDF_STATUS_SUCCESS;
  2576. }
  2577. #else
  2578. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2579. struct dp_pdev *pdev)
  2580. {
  2581. return QDF_STATUS_SUCCESS;
  2582. }
  2583. #endif
  2584. /**
  2585. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2586. * @pdev - DP_PDEV handle
  2587. *
  2588. * Return: void
  2589. */
  2590. static inline void
  2591. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2592. {
  2593. uint8_t map_id;
  2594. struct dp_soc *soc = pdev->soc;
  2595. if (!soc)
  2596. return;
  2597. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2598. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2599. default_dscp_tid_map,
  2600. sizeof(default_dscp_tid_map));
  2601. }
  2602. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2603. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2604. default_dscp_tid_map,
  2605. map_id);
  2606. }
  2607. }
  2608. #ifdef IPA_OFFLOAD
  2609. /**
  2610. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2611. * @soc: data path instance
  2612. * @pdev: core txrx pdev context
  2613. *
  2614. * Return: QDF_STATUS_SUCCESS: success
  2615. * QDF_STATUS_E_RESOURCES: Error return
  2616. */
  2617. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2618. struct dp_pdev *pdev)
  2619. {
  2620. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2621. int entries;
  2622. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2623. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2624. /* Setup second Rx refill buffer ring */
  2625. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2626. IPA_RX_REFILL_BUF_RING_IDX,
  2627. pdev->pdev_id,
  2628. entries)) {
  2629. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2630. FL("dp_srng_setup failed second rx refill ring"));
  2631. return QDF_STATUS_E_FAILURE;
  2632. }
  2633. return QDF_STATUS_SUCCESS;
  2634. }
  2635. /**
  2636. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2637. * @soc: data path instance
  2638. * @pdev: core txrx pdev context
  2639. *
  2640. * Return: void
  2641. */
  2642. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2643. struct dp_pdev *pdev)
  2644. {
  2645. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2646. IPA_RX_REFILL_BUF_RING_IDX);
  2647. }
  2648. #else
  2649. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2650. struct dp_pdev *pdev)
  2651. {
  2652. return QDF_STATUS_SUCCESS;
  2653. }
  2654. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2655. struct dp_pdev *pdev)
  2656. {
  2657. }
  2658. #endif
  2659. #if !defined(DISABLE_MON_CONFIG)
  2660. /**
  2661. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2662. * @soc: soc handle
  2663. * @pdev: physical device handle
  2664. *
  2665. * Return: nonzero on failure and zero on success
  2666. */
  2667. static
  2668. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2669. {
  2670. int mac_id = 0;
  2671. int pdev_id = pdev->pdev_id;
  2672. int entries;
  2673. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2674. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2675. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2676. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2677. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2678. entries =
  2679. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2680. if (dp_srng_setup(soc,
  2681. &pdev->rxdma_mon_buf_ring[mac_id],
  2682. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2683. entries)) {
  2684. QDF_TRACE(QDF_MODULE_ID_DP,
  2685. QDF_TRACE_LEVEL_ERROR,
  2686. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2687. return QDF_STATUS_E_NOMEM;
  2688. }
  2689. entries =
  2690. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2691. if (dp_srng_setup(soc,
  2692. &pdev->rxdma_mon_dst_ring[mac_id],
  2693. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2694. entries)) {
  2695. QDF_TRACE(QDF_MODULE_ID_DP,
  2696. QDF_TRACE_LEVEL_ERROR,
  2697. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2698. return QDF_STATUS_E_NOMEM;
  2699. }
  2700. entries =
  2701. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2702. if (dp_srng_setup(soc,
  2703. &pdev->rxdma_mon_status_ring[mac_id],
  2704. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2705. entries)) {
  2706. QDF_TRACE(QDF_MODULE_ID_DP,
  2707. QDF_TRACE_LEVEL_ERROR,
  2708. FL(RNG_ERR "rxdma_mon_status_ring"));
  2709. return QDF_STATUS_E_NOMEM;
  2710. }
  2711. entries =
  2712. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2713. if (dp_srng_setup(soc,
  2714. &pdev->rxdma_mon_desc_ring[mac_id],
  2715. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2716. entries)) {
  2717. QDF_TRACE(QDF_MODULE_ID_DP,
  2718. QDF_TRACE_LEVEL_ERROR,
  2719. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2720. return QDF_STATUS_E_NOMEM;
  2721. }
  2722. } else {
  2723. entries =
  2724. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2725. if (dp_srng_setup(soc,
  2726. &pdev->rxdma_mon_status_ring[mac_id],
  2727. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2728. entries)) {
  2729. QDF_TRACE(QDF_MODULE_ID_DP,
  2730. QDF_TRACE_LEVEL_ERROR,
  2731. FL(RNG_ERR "rxdma_mon_status_ring"));
  2732. return QDF_STATUS_E_NOMEM;
  2733. }
  2734. }
  2735. }
  2736. return QDF_STATUS_SUCCESS;
  2737. }
  2738. #else
  2739. static
  2740. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2741. {
  2742. return QDF_STATUS_SUCCESS;
  2743. }
  2744. #endif
  2745. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  2746. * @pdev_hdl: pdev handle
  2747. */
  2748. #ifdef ATH_SUPPORT_EXT_STAT
  2749. void dp_iterate_update_peer_list(void *pdev_hdl)
  2750. {
  2751. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  2752. struct dp_soc *soc = pdev->soc;
  2753. struct dp_vdev *vdev = NULL;
  2754. struct dp_peer *peer = NULL;
  2755. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2756. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  2757. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  2758. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  2759. dp_cal_client_update_peer_stats(&peer->stats);
  2760. }
  2761. }
  2762. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  2763. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2764. }
  2765. #else
  2766. void dp_iterate_update_peer_list(void *pdev_hdl)
  2767. {
  2768. }
  2769. #endif
  2770. /*
  2771. * dp_pdev_attach_wifi3() - attach txrx pdev
  2772. * @ctrl_pdev: Opaque PDEV object
  2773. * @txrx_soc: Datapath SOC handle
  2774. * @htc_handle: HTC handle for host-target interface
  2775. * @qdf_osdev: QDF OS device
  2776. * @pdev_id: PDEV ID
  2777. *
  2778. * Return: DP PDEV handle on success, NULL on failure
  2779. */
  2780. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  2781. struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  2782. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  2783. {
  2784. int tx_ring_size;
  2785. int tx_comp_ring_size;
  2786. int reo_dst_ring_size;
  2787. int entries;
  2788. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2789. int nss_cfg;
  2790. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2791. struct dp_pdev *pdev = NULL;
  2792. if (soc->dp_soc_reinit)
  2793. pdev = soc->pdev_list[pdev_id];
  2794. else
  2795. pdev = qdf_mem_malloc(sizeof(*pdev));
  2796. if (!pdev) {
  2797. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2798. FL("DP PDEV memory allocation failed"));
  2799. goto fail0;
  2800. }
  2801. /*
  2802. * Variable to prevent double pdev deinitialization during
  2803. * radio detach execution .i.e. in the absence of any vdev.
  2804. */
  2805. pdev->pdev_deinit = 0;
  2806. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  2807. if (!pdev->invalid_peer) {
  2808. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2809. FL("Invalid peer memory allocation failed"));
  2810. qdf_mem_free(pdev);
  2811. goto fail0;
  2812. }
  2813. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2814. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  2815. if (!pdev->wlan_cfg_ctx) {
  2816. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2817. FL("pdev cfg_attach failed"));
  2818. qdf_mem_free(pdev->invalid_peer);
  2819. qdf_mem_free(pdev);
  2820. goto fail0;
  2821. }
  2822. /*
  2823. * set nss pdev config based on soc config
  2824. */
  2825. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  2826. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  2827. (nss_cfg & (1 << pdev_id)));
  2828. pdev->soc = soc;
  2829. pdev->ctrl_pdev = ctrl_pdev;
  2830. pdev->pdev_id = pdev_id;
  2831. soc->pdev_list[pdev_id] = pdev;
  2832. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  2833. soc->pdev_count++;
  2834. TAILQ_INIT(&pdev->vdev_list);
  2835. qdf_spinlock_create(&pdev->vdev_list_lock);
  2836. pdev->vdev_count = 0;
  2837. qdf_spinlock_create(&pdev->tx_mutex);
  2838. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  2839. TAILQ_INIT(&pdev->neighbour_peers_list);
  2840. pdev->neighbour_peers_added = false;
  2841. pdev->monitor_configured = false;
  2842. if (dp_soc_cmn_setup(soc)) {
  2843. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2844. FL("dp_soc_cmn_setup failed"));
  2845. goto fail1;
  2846. }
  2847. /* Setup per PDEV TCL rings if configured */
  2848. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2849. tx_ring_size =
  2850. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2851. tx_comp_ring_size =
  2852. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2853. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  2854. pdev_id, pdev_id, tx_ring_size)) {
  2855. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2856. FL("dp_srng_setup failed for tcl_data_ring"));
  2857. goto fail1;
  2858. }
  2859. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  2860. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  2861. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2862. FL("dp_srng_setup failed for tx_comp_ring"));
  2863. goto fail1;
  2864. }
  2865. soc->num_tcl_data_rings++;
  2866. }
  2867. /* Tx specific init */
  2868. if (dp_tx_pdev_attach(pdev)) {
  2869. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2870. FL("dp_tx_pdev_attach failed"));
  2871. goto fail1;
  2872. }
  2873. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2874. /* Setup per PDEV REO rings if configured */
  2875. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2876. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  2877. pdev_id, pdev_id, reo_dst_ring_size)) {
  2878. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2879. FL("dp_srng_setup failed for reo_dest_ringn"));
  2880. goto fail1;
  2881. }
  2882. soc->num_reo_dest_rings++;
  2883. }
  2884. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  2885. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
  2886. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2887. FL("dp_srng_setup failed rx refill ring"));
  2888. goto fail1;
  2889. }
  2890. if (dp_rxdma_ring_setup(soc, pdev)) {
  2891. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2892. FL("RXDMA ring config failed"));
  2893. goto fail1;
  2894. }
  2895. if (dp_mon_rings_setup(soc, pdev)) {
  2896. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2897. FL("MONITOR rings setup failed"));
  2898. goto fail1;
  2899. }
  2900. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2901. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2902. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  2903. 0, pdev_id,
  2904. entries)) {
  2905. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2906. FL(RNG_ERR "rxdma_err_dst_ring"));
  2907. goto fail1;
  2908. }
  2909. }
  2910. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  2911. goto fail1;
  2912. if (dp_ipa_ring_resource_setup(soc, pdev))
  2913. goto fail1;
  2914. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  2915. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2916. FL("dp_ipa_uc_attach failed"));
  2917. goto fail1;
  2918. }
  2919. /* Rx specific init */
  2920. if (dp_rx_pdev_attach(pdev)) {
  2921. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2922. FL("dp_rx_pdev_attach failed"));
  2923. goto fail1;
  2924. }
  2925. DP_STATS_INIT(pdev);
  2926. /* Monitor filter init */
  2927. pdev->mon_filter_mode = MON_FILTER_ALL;
  2928. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  2929. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  2930. pdev->fp_data_filter = FILTER_DATA_ALL;
  2931. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  2932. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  2933. pdev->mo_data_filter = FILTER_DATA_ALL;
  2934. dp_local_peer_id_pool_init(pdev);
  2935. dp_dscp_tid_map_setup(pdev);
  2936. /* Rx monitor mode specific init */
  2937. if (dp_rx_pdev_mon_attach(pdev)) {
  2938. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2939. "dp_rx_pdev_mon_attach failed");
  2940. goto fail1;
  2941. }
  2942. if (dp_wdi_event_attach(pdev)) {
  2943. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2944. "dp_wdi_evet_attach failed");
  2945. goto fail1;
  2946. }
  2947. /* set the reo destination during initialization */
  2948. pdev->reo_dest = pdev->pdev_id + 1;
  2949. /*
  2950. * initialize ppdu tlv list
  2951. */
  2952. TAILQ_INIT(&pdev->ppdu_info_list);
  2953. pdev->tlv_count = 0;
  2954. pdev->list_depth = 0;
  2955. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  2956. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  2957. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  2958. TRUE);
  2959. /* initlialize cal client timer */
  2960. dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
  2961. &dp_iterate_update_peer_list);
  2962. qdf_event_create(&pdev->fw_peer_stats_event);
  2963. return (struct cdp_pdev *)pdev;
  2964. fail1:
  2965. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  2966. fail0:
  2967. return NULL;
  2968. }
  2969. /*
  2970. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2971. * @soc: data path SoC handle
  2972. * @pdev: Physical device handle
  2973. *
  2974. * Return: void
  2975. */
  2976. #ifdef QCA_HOST2FW_RXBUF_RING
  2977. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2978. struct dp_pdev *pdev)
  2979. {
  2980. int max_mac_rings =
  2981. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2982. int i;
  2983. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2984. max_mac_rings : MAX_RX_MAC_RINGS;
  2985. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2986. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2987. RXDMA_BUF, 1);
  2988. qdf_timer_free(&soc->mon_reap_timer);
  2989. }
  2990. #else
  2991. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2992. struct dp_pdev *pdev)
  2993. {
  2994. }
  2995. #endif
  2996. /*
  2997. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2998. * @pdev: device object
  2999. *
  3000. * Return: void
  3001. */
  3002. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  3003. {
  3004. struct dp_neighbour_peer *peer = NULL;
  3005. struct dp_neighbour_peer *temp_peer = NULL;
  3006. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3007. neighbour_peer_list_elem, temp_peer) {
  3008. /* delete this peer from the list */
  3009. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  3010. peer, neighbour_peer_list_elem);
  3011. qdf_mem_free(peer);
  3012. }
  3013. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  3014. }
  3015. /**
  3016. * dp_htt_ppdu_stats_detach() - detach stats resources
  3017. * @pdev: Datapath PDEV handle
  3018. *
  3019. * Return: void
  3020. */
  3021. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  3022. {
  3023. struct ppdu_info *ppdu_info, *ppdu_info_next;
  3024. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  3025. ppdu_info_list_elem, ppdu_info_next) {
  3026. if (!ppdu_info)
  3027. break;
  3028. qdf_assert_always(ppdu_info->nbuf);
  3029. qdf_nbuf_free(ppdu_info->nbuf);
  3030. qdf_mem_free(ppdu_info);
  3031. }
  3032. }
  3033. #if !defined(DISABLE_MON_CONFIG)
  3034. static
  3035. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3036. int mac_id)
  3037. {
  3038. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3039. dp_srng_cleanup(soc,
  3040. &pdev->rxdma_mon_buf_ring[mac_id],
  3041. RXDMA_MONITOR_BUF, 0);
  3042. dp_srng_cleanup(soc,
  3043. &pdev->rxdma_mon_dst_ring[mac_id],
  3044. RXDMA_MONITOR_DST, 0);
  3045. dp_srng_cleanup(soc,
  3046. &pdev->rxdma_mon_status_ring[mac_id],
  3047. RXDMA_MONITOR_STATUS, 0);
  3048. dp_srng_cleanup(soc,
  3049. &pdev->rxdma_mon_desc_ring[mac_id],
  3050. RXDMA_MONITOR_DESC, 0);
  3051. dp_srng_cleanup(soc,
  3052. &pdev->rxdma_err_dst_ring[mac_id],
  3053. RXDMA_DST, 0);
  3054. } else {
  3055. dp_srng_cleanup(soc,
  3056. &pdev->rxdma_mon_status_ring[mac_id],
  3057. RXDMA_MONITOR_STATUS, 0);
  3058. dp_srng_cleanup(soc,
  3059. &pdev->rxdma_err_dst_ring[mac_id],
  3060. RXDMA_DST, 0);
  3061. }
  3062. }
  3063. #else
  3064. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3065. int mac_id)
  3066. {
  3067. }
  3068. #endif
  3069. /**
  3070. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  3071. *
  3072. * @soc: soc handle
  3073. * @pdev: datapath physical dev handle
  3074. * @mac_id: mac number
  3075. *
  3076. * Return: None
  3077. */
  3078. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  3079. int mac_id)
  3080. {
  3081. }
  3082. /**
  3083. * dp_pdev_mem_reset() - Reset txrx pdev memory
  3084. * @pdev: dp pdev handle
  3085. *
  3086. * Return: None
  3087. */
  3088. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  3089. {
  3090. uint16_t len = 0;
  3091. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  3092. len = sizeof(struct dp_pdev) -
  3093. offsetof(struct dp_pdev, pdev_deinit) -
  3094. sizeof(pdev->pdev_deinit);
  3095. dp_pdev_offset = dp_pdev_offset +
  3096. offsetof(struct dp_pdev, pdev_deinit) +
  3097. sizeof(pdev->pdev_deinit);
  3098. qdf_mem_zero(dp_pdev_offset, len);
  3099. }
  3100. /**
  3101. * dp_pdev_deinit() - Deinit txrx pdev
  3102. * @txrx_pdev: Datapath PDEV handle
  3103. * @force: Force deinit
  3104. *
  3105. * Return: None
  3106. */
  3107. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  3108. {
  3109. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3110. struct dp_soc *soc = pdev->soc;
  3111. qdf_nbuf_t curr_nbuf, next_nbuf;
  3112. int mac_id;
  3113. /*
  3114. * Prevent double pdev deinitialization during radio detach
  3115. * execution .i.e. in the absence of any vdev
  3116. */
  3117. if (pdev->pdev_deinit)
  3118. return;
  3119. pdev->pdev_deinit = 1;
  3120. dp_wdi_event_detach(pdev);
  3121. dp_tx_pdev_detach(pdev);
  3122. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3123. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3124. TCL_DATA, pdev->pdev_id);
  3125. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3126. WBM2SW_RELEASE, pdev->pdev_id);
  3127. }
  3128. dp_pktlogmod_exit(pdev);
  3129. dp_rx_pdev_detach(pdev);
  3130. dp_rx_pdev_mon_detach(pdev);
  3131. dp_neighbour_peers_detach(pdev);
  3132. qdf_spinlock_destroy(&pdev->tx_mutex);
  3133. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3134. dp_ipa_uc_detach(soc, pdev);
  3135. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3136. /* Cleanup per PDEV REO rings if configured */
  3137. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3138. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3139. REO_DST, pdev->pdev_id);
  3140. }
  3141. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3142. dp_rxdma_ring_cleanup(soc, pdev);
  3143. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3144. dp_mon_ring_deinit(soc, pdev, mac_id);
  3145. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3146. RXDMA_DST, 0);
  3147. }
  3148. curr_nbuf = pdev->invalid_peer_head_msdu;
  3149. while (curr_nbuf) {
  3150. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3151. qdf_nbuf_free(curr_nbuf);
  3152. curr_nbuf = next_nbuf;
  3153. }
  3154. pdev->invalid_peer_head_msdu = NULL;
  3155. pdev->invalid_peer_tail_msdu = NULL;
  3156. dp_htt_ppdu_stats_detach(pdev);
  3157. qdf_nbuf_free(pdev->sojourn_buf);
  3158. dp_cal_client_detach(&pdev->cal_client_ctx);
  3159. soc->pdev_count--;
  3160. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3161. qdf_mem_free(pdev->invalid_peer);
  3162. qdf_mem_free(pdev->dp_txrx_handle);
  3163. dp_pdev_mem_reset(pdev);
  3164. }
  3165. /**
  3166. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3167. * @txrx_pdev: Datapath PDEV handle
  3168. * @force: Force deinit
  3169. *
  3170. * Return: None
  3171. */
  3172. static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3173. {
  3174. dp_pdev_deinit(txrx_pdev, force);
  3175. }
  3176. /*
  3177. * dp_pdev_detach() - Complete rest of pdev detach
  3178. * @txrx_pdev: Datapath PDEV handle
  3179. * @force: Force deinit
  3180. *
  3181. * Return: None
  3182. */
  3183. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3184. {
  3185. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3186. struct dp_soc *soc = pdev->soc;
  3187. int mac_id;
  3188. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3189. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3190. TCL_DATA, pdev->pdev_id);
  3191. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3192. WBM2SW_RELEASE, pdev->pdev_id);
  3193. }
  3194. dp_mon_link_free(pdev);
  3195. /* Cleanup per PDEV REO rings if configured */
  3196. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3197. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3198. REO_DST, pdev->pdev_id);
  3199. }
  3200. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3201. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3202. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3203. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3204. RXDMA_DST, 0);
  3205. }
  3206. soc->pdev_list[pdev->pdev_id] = NULL;
  3207. qdf_mem_free(pdev);
  3208. }
  3209. /*
  3210. * dp_pdev_detach_wifi3() - detach txrx pdev
  3211. * @txrx_pdev: Datapath PDEV handle
  3212. * @force: Force detach
  3213. *
  3214. * Return: None
  3215. */
  3216. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3217. {
  3218. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3219. struct dp_soc *soc = pdev->soc;
  3220. if (soc->dp_soc_reinit) {
  3221. dp_pdev_detach(txrx_pdev, force);
  3222. } else {
  3223. dp_pdev_deinit(txrx_pdev, force);
  3224. dp_pdev_detach(txrx_pdev, force);
  3225. }
  3226. }
  3227. /*
  3228. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3229. * @soc: DP SOC handle
  3230. */
  3231. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3232. {
  3233. struct reo_desc_list_node *desc;
  3234. struct dp_rx_tid *rx_tid;
  3235. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3236. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3237. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3238. rx_tid = &desc->rx_tid;
  3239. qdf_mem_unmap_nbytes_single(soc->osdev,
  3240. rx_tid->hw_qdesc_paddr,
  3241. QDF_DMA_BIDIRECTIONAL,
  3242. rx_tid->hw_qdesc_alloc_size);
  3243. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3244. qdf_mem_free(desc);
  3245. }
  3246. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3247. qdf_list_destroy(&soc->reo_desc_freelist);
  3248. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3249. }
  3250. /**
  3251. * dp_soc_mem_reset() - Reset Dp Soc memory
  3252. * @soc: DP handle
  3253. *
  3254. * Return: None
  3255. */
  3256. static void dp_soc_mem_reset(struct dp_soc *soc)
  3257. {
  3258. uint16_t len = 0;
  3259. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3260. len = sizeof(struct dp_soc) -
  3261. offsetof(struct dp_soc, dp_soc_reinit) -
  3262. sizeof(soc->dp_soc_reinit);
  3263. dp_soc_offset = dp_soc_offset +
  3264. offsetof(struct dp_soc, dp_soc_reinit) +
  3265. sizeof(soc->dp_soc_reinit);
  3266. qdf_mem_zero(dp_soc_offset, len);
  3267. }
  3268. /**
  3269. * dp_soc_deinit() - Deinitialize txrx SOC
  3270. * @txrx_soc: Opaque DP SOC handle
  3271. *
  3272. * Return: None
  3273. */
  3274. static void dp_soc_deinit(void *txrx_soc)
  3275. {
  3276. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3277. int i;
  3278. qdf_atomic_set(&soc->cmn_init_done, 0);
  3279. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3280. if (soc->pdev_list[i])
  3281. dp_pdev_deinit((struct cdp_pdev *)
  3282. soc->pdev_list[i], 1);
  3283. }
  3284. qdf_flush_work(&soc->htt_stats.work);
  3285. qdf_disable_work(&soc->htt_stats.work);
  3286. /* Free pending htt stats messages */
  3287. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3288. dp_reo_cmdlist_destroy(soc);
  3289. dp_peer_find_detach(soc);
  3290. /* Free the ring memories */
  3291. /* Common rings */
  3292. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3293. /* Tx data rings */
  3294. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3295. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3296. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3297. TCL_DATA, i);
  3298. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3299. WBM2SW_RELEASE, i);
  3300. }
  3301. }
  3302. /* TCL command and status rings */
  3303. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3304. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3305. /* Rx data rings */
  3306. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3307. soc->num_reo_dest_rings =
  3308. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3309. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3310. /* TODO: Get number of rings and ring sizes
  3311. * from wlan_cfg
  3312. */
  3313. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3314. REO_DST, i);
  3315. }
  3316. }
  3317. /* REO reinjection ring */
  3318. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3319. /* Rx release ring */
  3320. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3321. /* Rx exception ring */
  3322. /* TODO: Better to store ring_type and ring_num in
  3323. * dp_srng during setup
  3324. */
  3325. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3326. /* REO command and status rings */
  3327. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3328. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3329. dp_soc_wds_detach(soc);
  3330. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3331. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3332. htt_soc_htc_dealloc(soc->htt_handle);
  3333. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  3334. dp_reo_cmdlist_destroy(soc);
  3335. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  3336. dp_reo_desc_freelist_destroy(soc);
  3337. qdf_spinlock_destroy(&soc->ast_lock);
  3338. dp_soc_mem_reset(soc);
  3339. }
  3340. /**
  3341. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3342. * @txrx_soc: Opaque DP SOC handle
  3343. *
  3344. * Return: None
  3345. */
  3346. static void dp_soc_deinit_wifi3(void *txrx_soc)
  3347. {
  3348. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3349. soc->dp_soc_reinit = 1;
  3350. dp_soc_deinit(txrx_soc);
  3351. }
  3352. /*
  3353. * dp_soc_detach() - Detach rest of txrx SOC
  3354. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3355. *
  3356. * Return: None
  3357. */
  3358. static void dp_soc_detach(void *txrx_soc)
  3359. {
  3360. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3361. int i;
  3362. qdf_atomic_set(&soc->cmn_init_done, 0);
  3363. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3364. * SW descriptors
  3365. */
  3366. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3367. if (soc->pdev_list[i])
  3368. dp_pdev_detach((struct cdp_pdev *)
  3369. soc->pdev_list[i], 1);
  3370. }
  3371. /* Free the ring memories */
  3372. /* Common rings */
  3373. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3374. dp_tx_soc_detach(soc);
  3375. /* Tx data rings */
  3376. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3377. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3378. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3379. TCL_DATA, i);
  3380. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3381. WBM2SW_RELEASE, i);
  3382. }
  3383. }
  3384. /* TCL command and status rings */
  3385. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3386. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3387. /* Rx data rings */
  3388. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3389. soc->num_reo_dest_rings =
  3390. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3391. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3392. /* TODO: Get number of rings and ring sizes
  3393. * from wlan_cfg
  3394. */
  3395. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3396. REO_DST, i);
  3397. }
  3398. }
  3399. /* REO reinjection ring */
  3400. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3401. /* Rx release ring */
  3402. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3403. /* Rx exception ring */
  3404. /* TODO: Better to store ring_type and ring_num in
  3405. * dp_srng during setup
  3406. */
  3407. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3408. /* REO command and status rings */
  3409. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3410. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3411. dp_hw_link_desc_pool_cleanup(soc);
  3412. htt_soc_detach(soc->htt_handle);
  3413. soc->dp_soc_reinit = 0;
  3414. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3415. qdf_mem_free(soc);
  3416. }
  3417. /*
  3418. * dp_soc_detach_wifi3() - Detach txrx SOC
  3419. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3420. *
  3421. * Return: None
  3422. */
  3423. static void dp_soc_detach_wifi3(void *txrx_soc)
  3424. {
  3425. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3426. if (soc->dp_soc_reinit) {
  3427. dp_soc_detach(txrx_soc);
  3428. } else {
  3429. dp_soc_deinit(txrx_soc);
  3430. dp_soc_detach(txrx_soc);
  3431. }
  3432. }
  3433. #if !defined(DISABLE_MON_CONFIG)
  3434. /**
  3435. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3436. * @soc: soc handle
  3437. * @pdev: physical device handle
  3438. * @mac_id: ring number
  3439. * @mac_for_pdev: mac_id
  3440. *
  3441. * Return: non-zero for failure, zero for success
  3442. */
  3443. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3444. struct dp_pdev *pdev,
  3445. int mac_id,
  3446. int mac_for_pdev)
  3447. {
  3448. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3449. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3450. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3451. pdev->rxdma_mon_buf_ring[mac_id]
  3452. .hal_srng,
  3453. RXDMA_MONITOR_BUF);
  3454. if (status != QDF_STATUS_SUCCESS) {
  3455. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3456. return status;
  3457. }
  3458. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3459. pdev->rxdma_mon_dst_ring[mac_id]
  3460. .hal_srng,
  3461. RXDMA_MONITOR_DST);
  3462. if (status != QDF_STATUS_SUCCESS) {
  3463. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3464. return status;
  3465. }
  3466. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3467. pdev->rxdma_mon_status_ring[mac_id]
  3468. .hal_srng,
  3469. RXDMA_MONITOR_STATUS);
  3470. if (status != QDF_STATUS_SUCCESS) {
  3471. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3472. return status;
  3473. }
  3474. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3475. pdev->rxdma_mon_desc_ring[mac_id]
  3476. .hal_srng,
  3477. RXDMA_MONITOR_DESC);
  3478. if (status != QDF_STATUS_SUCCESS) {
  3479. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3480. return status;
  3481. }
  3482. } else {
  3483. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3484. pdev->rxdma_mon_status_ring[mac_id]
  3485. .hal_srng,
  3486. RXDMA_MONITOR_STATUS);
  3487. if (status != QDF_STATUS_SUCCESS) {
  3488. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3489. return status;
  3490. }
  3491. }
  3492. return status;
  3493. }
  3494. #else
  3495. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3496. struct dp_pdev *pdev,
  3497. int mac_id,
  3498. int mac_for_pdev)
  3499. {
  3500. return QDF_STATUS_SUCCESS;
  3501. }
  3502. #endif
  3503. /*
  3504. * dp_rxdma_ring_config() - configure the RX DMA rings
  3505. *
  3506. * This function is used to configure the MAC rings.
  3507. * On MCL host provides buffers in Host2FW ring
  3508. * FW refills (copies) buffers to the ring and updates
  3509. * ring_idx in register
  3510. *
  3511. * @soc: data path SoC handle
  3512. *
  3513. * Return: zero on success, non-zero on failure
  3514. */
  3515. #ifdef QCA_HOST2FW_RXBUF_RING
  3516. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3517. {
  3518. int i;
  3519. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3520. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3521. struct dp_pdev *pdev = soc->pdev_list[i];
  3522. if (pdev) {
  3523. int mac_id;
  3524. bool dbs_enable = 0;
  3525. int max_mac_rings =
  3526. wlan_cfg_get_num_mac_rings
  3527. (pdev->wlan_cfg_ctx);
  3528. htt_srng_setup(soc->htt_handle, 0,
  3529. pdev->rx_refill_buf_ring.hal_srng,
  3530. RXDMA_BUF);
  3531. if (pdev->rx_refill_buf_ring2.hal_srng)
  3532. htt_srng_setup(soc->htt_handle, 0,
  3533. pdev->rx_refill_buf_ring2.hal_srng,
  3534. RXDMA_BUF);
  3535. if (soc->cdp_soc.ol_ops->
  3536. is_hw_dbs_2x2_capable) {
  3537. dbs_enable = soc->cdp_soc.ol_ops->
  3538. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  3539. }
  3540. if (dbs_enable) {
  3541. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3542. QDF_TRACE_LEVEL_ERROR,
  3543. FL("DBS enabled max_mac_rings %d"),
  3544. max_mac_rings);
  3545. } else {
  3546. max_mac_rings = 1;
  3547. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3548. QDF_TRACE_LEVEL_ERROR,
  3549. FL("DBS disabled, max_mac_rings %d"),
  3550. max_mac_rings);
  3551. }
  3552. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3553. FL("pdev_id %d max_mac_rings %d"),
  3554. pdev->pdev_id, max_mac_rings);
  3555. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3556. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3557. mac_id, pdev->pdev_id);
  3558. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3559. QDF_TRACE_LEVEL_ERROR,
  3560. FL("mac_id %d"), mac_for_pdev);
  3561. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3562. pdev->rx_mac_buf_ring[mac_id]
  3563. .hal_srng,
  3564. RXDMA_BUF);
  3565. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3566. pdev->rxdma_err_dst_ring[mac_id]
  3567. .hal_srng,
  3568. RXDMA_DST);
  3569. /* Configure monitor mode rings */
  3570. status = dp_mon_htt_srng_setup(soc, pdev,
  3571. mac_id,
  3572. mac_for_pdev);
  3573. if (status != QDF_STATUS_SUCCESS) {
  3574. dp_err("Failed to send htt monitor messages to target");
  3575. return status;
  3576. }
  3577. }
  3578. }
  3579. }
  3580. /*
  3581. * Timer to reap rxdma status rings.
  3582. * Needed until we enable ppdu end interrupts
  3583. */
  3584. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3585. dp_service_mon_rings, (void *)soc,
  3586. QDF_TIMER_TYPE_WAKE_APPS);
  3587. soc->reap_timer_init = 1;
  3588. return status;
  3589. }
  3590. #else
  3591. /* This is only for WIN */
  3592. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3593. {
  3594. int i;
  3595. int mac_id;
  3596. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3597. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3598. struct dp_pdev *pdev = soc->pdev_list[i];
  3599. if (pdev == NULL)
  3600. continue;
  3601. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3602. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3603. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3604. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3605. #ifndef DISABLE_MON_CONFIG
  3606. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3607. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3608. RXDMA_MONITOR_BUF);
  3609. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3610. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  3611. RXDMA_MONITOR_DST);
  3612. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3613. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  3614. RXDMA_MONITOR_STATUS);
  3615. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3616. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  3617. RXDMA_MONITOR_DESC);
  3618. #endif
  3619. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3620. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  3621. RXDMA_DST);
  3622. }
  3623. }
  3624. return status;
  3625. }
  3626. #endif
  3627. /*
  3628. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  3629. * @cdp_soc: Opaque Datapath SOC handle
  3630. *
  3631. * Return: zero on success, non-zero on failure
  3632. */
  3633. static QDF_STATUS
  3634. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  3635. {
  3636. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3637. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3638. htt_soc_attach_target(soc->htt_handle);
  3639. status = dp_rxdma_ring_config(soc);
  3640. if (status != QDF_STATUS_SUCCESS) {
  3641. dp_err("Failed to send htt srng setup messages to target");
  3642. return status;
  3643. }
  3644. DP_STATS_INIT(soc);
  3645. /* initialize work queue for stats processing */
  3646. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3647. return QDF_STATUS_SUCCESS;
  3648. }
  3649. /*
  3650. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  3651. * @txrx_soc: Datapath SOC handle
  3652. */
  3653. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  3654. {
  3655. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3656. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  3657. }
  3658. /*
  3659. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  3660. * @txrx_soc: Datapath SOC handle
  3661. * @nss_cfg: nss config
  3662. */
  3663. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  3664. {
  3665. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3666. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  3667. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  3668. /*
  3669. * TODO: masked out based on the per offloaded radio
  3670. */
  3671. switch (config) {
  3672. case dp_nss_cfg_default:
  3673. break;
  3674. case dp_nss_cfg_dbdc:
  3675. case dp_nss_cfg_dbtc:
  3676. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  3677. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  3678. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  3679. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  3680. break;
  3681. default:
  3682. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3683. "Invalid offload config %d", config);
  3684. }
  3685. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3686. FL("nss-wifi<0> nss config is enabled"));
  3687. }
  3688. /*
  3689. * dp_vdev_attach_wifi3() - attach txrx vdev
  3690. * @txrx_pdev: Datapath PDEV handle
  3691. * @vdev_mac_addr: MAC address of the virtual interface
  3692. * @vdev_id: VDEV Id
  3693. * @wlan_op_mode: VDEV operating mode
  3694. *
  3695. * Return: DP VDEV handle on success, NULL on failure
  3696. */
  3697. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  3698. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  3699. {
  3700. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3701. struct dp_soc *soc = pdev->soc;
  3702. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  3703. if (!vdev) {
  3704. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3705. FL("DP VDEV memory allocation failed"));
  3706. goto fail0;
  3707. }
  3708. vdev->pdev = pdev;
  3709. vdev->vdev_id = vdev_id;
  3710. vdev->opmode = op_mode;
  3711. vdev->osdev = soc->osdev;
  3712. vdev->osif_rx = NULL;
  3713. vdev->osif_rsim_rx_decap = NULL;
  3714. vdev->osif_get_key = NULL;
  3715. vdev->osif_rx_mon = NULL;
  3716. vdev->osif_tx_free_ext = NULL;
  3717. vdev->osif_vdev = NULL;
  3718. vdev->delete.pending = 0;
  3719. vdev->safemode = 0;
  3720. vdev->drop_unenc = 1;
  3721. vdev->sec_type = cdp_sec_type_none;
  3722. #ifdef notyet
  3723. vdev->filters_num = 0;
  3724. #endif
  3725. qdf_mem_copy(
  3726. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3727. /* TODO: Initialize default HTT meta data that will be used in
  3728. * TCL descriptors for packets transmitted from this VDEV
  3729. */
  3730. TAILQ_INIT(&vdev->peer_list);
  3731. if ((soc->intr_mode == DP_INTR_POLL) &&
  3732. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  3733. if ((pdev->vdev_count == 0) ||
  3734. (wlan_op_mode_monitor == vdev->opmode))
  3735. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  3736. }
  3737. if (wlan_op_mode_monitor == vdev->opmode) {
  3738. pdev->monitor_vdev = vdev;
  3739. return (struct cdp_vdev *)vdev;
  3740. }
  3741. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3742. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3743. vdev->dscp_tid_map_id = 0;
  3744. vdev->mcast_enhancement_en = 0;
  3745. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  3746. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3747. /* add this vdev into the pdev's list */
  3748. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  3749. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3750. pdev->vdev_count++;
  3751. dp_tx_vdev_attach(vdev);
  3752. if (pdev->vdev_count == 1)
  3753. dp_lro_hash_setup(soc, pdev);
  3754. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3755. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  3756. DP_STATS_INIT(vdev);
  3757. if (wlan_op_mode_sta == vdev->opmode)
  3758. dp_peer_create_wifi3((struct cdp_vdev *)vdev,
  3759. vdev->mac_addr.raw,
  3760. NULL);
  3761. return (struct cdp_vdev *)vdev;
  3762. fail0:
  3763. return NULL;
  3764. }
  3765. /**
  3766. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  3767. * @vdev: Datapath VDEV handle
  3768. * @osif_vdev: OSIF vdev handle
  3769. * @ctrl_vdev: UMAC vdev handle
  3770. * @txrx_ops: Tx and Rx operations
  3771. *
  3772. * Return: DP VDEV handle on success, NULL on failure
  3773. */
  3774. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  3775. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  3776. struct ol_txrx_ops *txrx_ops)
  3777. {
  3778. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3779. vdev->osif_vdev = osif_vdev;
  3780. vdev->ctrl_vdev = ctrl_vdev;
  3781. vdev->osif_rx = txrx_ops->rx.rx;
  3782. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  3783. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  3784. vdev->osif_get_key = txrx_ops->get_key;
  3785. vdev->osif_rx_mon = txrx_ops->rx.mon;
  3786. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  3787. #ifdef notyet
  3788. #if ATH_SUPPORT_WAPI
  3789. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  3790. #endif
  3791. #endif
  3792. #ifdef UMAC_SUPPORT_PROXY_ARP
  3793. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  3794. #endif
  3795. vdev->me_convert = txrx_ops->me_convert;
  3796. /* TODO: Enable the following once Tx code is integrated */
  3797. if (vdev->mesh_vdev)
  3798. txrx_ops->tx.tx = dp_tx_send_mesh;
  3799. else
  3800. txrx_ops->tx.tx = dp_tx_send;
  3801. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  3802. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  3803. "DP Vdev Register success");
  3804. }
  3805. /**
  3806. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  3807. * @vdev: Datapath VDEV handle
  3808. * @unmap_only: Flag to indicate "only unmap"
  3809. *
  3810. * Return: void
  3811. */
  3812. static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
  3813. {
  3814. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3815. struct dp_pdev *pdev = vdev->pdev;
  3816. struct dp_soc *soc = pdev->soc;
  3817. struct dp_peer *peer;
  3818. uint16_t *peer_ids;
  3819. uint8_t i = 0, j = 0;
  3820. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  3821. if (!peer_ids) {
  3822. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3823. "DP alloc failure - unable to flush peers");
  3824. return;
  3825. }
  3826. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3827. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3828. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3829. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  3830. if (j < soc->max_peers)
  3831. peer_ids[j++] = peer->peer_ids[i];
  3832. }
  3833. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3834. for (i = 0; i < j ; i++) {
  3835. if (unmap_only) {
  3836. peer = __dp_peer_find_by_id(soc, peer_ids[i]);
  3837. if (peer) {
  3838. dp_rx_peer_unmap_handler(soc, peer_ids[i],
  3839. vdev->vdev_id,
  3840. peer->mac_addr.raw,
  3841. 0);
  3842. }
  3843. } else {
  3844. peer = dp_peer_find_by_id(soc, peer_ids[i]);
  3845. if (peer) {
  3846. dp_info("peer: %pM is getting flush",
  3847. peer->mac_addr.raw);
  3848. dp_peer_delete_wifi3(peer, 0);
  3849. /*
  3850. * we need to call dp_peer_unref_del_find_by_id
  3851. * to remove additional ref count incremented
  3852. * by dp_peer_find_by_id() call.
  3853. *
  3854. * Hold the ref count while executing
  3855. * dp_peer_delete_wifi3() call.
  3856. *
  3857. */
  3858. dp_peer_unref_del_find_by_id(peer);
  3859. dp_rx_peer_unmap_handler(soc, peer_ids[i],
  3860. vdev->vdev_id,
  3861. peer->mac_addr.raw, 0);
  3862. }
  3863. }
  3864. }
  3865. qdf_mem_free(peer_ids);
  3866. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3867. FL("Flushed peers for vdev object %pK "), vdev);
  3868. }
  3869. /*
  3870. * dp_vdev_detach_wifi3() - Detach txrx vdev
  3871. * @txrx_vdev: Datapath VDEV handle
  3872. * @callback: Callback OL_IF on completion of detach
  3873. * @cb_context: Callback context
  3874. *
  3875. */
  3876. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  3877. ol_txrx_vdev_delete_cb callback, void *cb_context)
  3878. {
  3879. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3880. struct dp_pdev *pdev = vdev->pdev;
  3881. struct dp_soc *soc = pdev->soc;
  3882. struct dp_neighbour_peer *peer = NULL;
  3883. struct dp_neighbour_peer *temp_peer = NULL;
  3884. /* preconditions */
  3885. qdf_assert(vdev);
  3886. if (wlan_op_mode_monitor == vdev->opmode)
  3887. goto free_vdev;
  3888. if (wlan_op_mode_sta == vdev->opmode)
  3889. dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
  3890. /*
  3891. * If Target is hung, flush all peers before detaching vdev
  3892. * this will free all references held due to missing
  3893. * unmap commands from Target
  3894. */
  3895. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  3896. dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
  3897. /*
  3898. * Use peer_ref_mutex while accessing peer_list, in case
  3899. * a peer is in the process of being removed from the list.
  3900. */
  3901. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3902. /* check that the vdev has no peers allocated */
  3903. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  3904. /* debug print - will be removed later */
  3905. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  3906. FL("not deleting vdev object %pK (%pM)"
  3907. "until deletion finishes for all its peers"),
  3908. vdev, vdev->mac_addr.raw);
  3909. /* indicate that the vdev needs to be deleted */
  3910. vdev->delete.pending = 1;
  3911. vdev->delete.callback = callback;
  3912. vdev->delete.context = cb_context;
  3913. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3914. return;
  3915. }
  3916. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3917. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  3918. if (!soc->hw_nac_monitor_support) {
  3919. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  3920. neighbour_peer_list_elem) {
  3921. QDF_ASSERT(peer->vdev != vdev);
  3922. }
  3923. } else {
  3924. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3925. neighbour_peer_list_elem, temp_peer) {
  3926. if (peer->vdev == vdev) {
  3927. TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
  3928. neighbour_peer_list_elem);
  3929. qdf_mem_free(peer);
  3930. }
  3931. }
  3932. }
  3933. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  3934. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3935. dp_tx_vdev_detach(vdev);
  3936. /* remove the vdev from its parent pdev's list */
  3937. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  3938. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3939. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  3940. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3941. free_vdev:
  3942. qdf_mem_free(vdev);
  3943. if (callback)
  3944. callback(cb_context);
  3945. }
  3946. /*
  3947. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  3948. * @soc - datapath soc handle
  3949. * @peer - datapath peer handle
  3950. *
  3951. * Delete the AST entries belonging to a peer
  3952. */
  3953. #ifdef FEATURE_AST
  3954. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3955. struct dp_peer *peer)
  3956. {
  3957. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  3958. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  3959. dp_peer_del_ast(soc, ast_entry);
  3960. peer->self_ast_entry = NULL;
  3961. }
  3962. #else
  3963. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3964. struct dp_peer *peer)
  3965. {
  3966. }
  3967. #endif
  3968. #if ATH_SUPPORT_WRAP
  3969. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3970. uint8_t *peer_mac_addr)
  3971. {
  3972. struct dp_peer *peer;
  3973. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3974. 0, vdev->vdev_id);
  3975. if (!peer)
  3976. return NULL;
  3977. if (peer->bss_peer)
  3978. return peer;
  3979. dp_peer_unref_delete(peer);
  3980. return NULL;
  3981. }
  3982. #else
  3983. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3984. uint8_t *peer_mac_addr)
  3985. {
  3986. struct dp_peer *peer;
  3987. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3988. 0, vdev->vdev_id);
  3989. if (!peer)
  3990. return NULL;
  3991. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  3992. return peer;
  3993. dp_peer_unref_delete(peer);
  3994. return NULL;
  3995. }
  3996. #endif
  3997. #ifdef FEATURE_AST
  3998. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3999. struct dp_pdev *pdev,
  4000. uint8_t *peer_mac_addr)
  4001. {
  4002. struct dp_ast_entry *ast_entry;
  4003. qdf_spin_lock_bh(&soc->ast_lock);
  4004. if (soc->ast_override_support)
  4005. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
  4006. pdev->pdev_id);
  4007. else
  4008. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  4009. if (ast_entry && ast_entry->next_hop &&
  4010. !ast_entry->delete_in_progress)
  4011. dp_peer_del_ast(soc, ast_entry);
  4012. qdf_spin_unlock_bh(&soc->ast_lock);
  4013. }
  4014. #endif
  4015. /*
  4016. * dp_peer_create_wifi3() - attach txrx peer
  4017. * @txrx_vdev: Datapath VDEV handle
  4018. * @peer_mac_addr: Peer MAC address
  4019. *
  4020. * Return: DP peeer handle on success, NULL on failure
  4021. */
  4022. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  4023. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  4024. {
  4025. struct dp_peer *peer;
  4026. int i;
  4027. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4028. struct dp_pdev *pdev;
  4029. struct dp_soc *soc;
  4030. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  4031. /* preconditions */
  4032. qdf_assert(vdev);
  4033. qdf_assert(peer_mac_addr);
  4034. pdev = vdev->pdev;
  4035. soc = pdev->soc;
  4036. /*
  4037. * If a peer entry with given MAC address already exists,
  4038. * reuse the peer and reset the state of peer.
  4039. */
  4040. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  4041. if (peer) {
  4042. qdf_atomic_init(&peer->is_default_route_set);
  4043. dp_peer_cleanup(vdev, peer);
  4044. qdf_spin_lock_bh(&soc->ast_lock);
  4045. dp_peer_delete_ast_entries(soc, peer);
  4046. peer->delete_in_progress = false;
  4047. qdf_spin_unlock_bh(&soc->ast_lock);
  4048. if ((vdev->opmode == wlan_op_mode_sta) &&
  4049. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4050. DP_MAC_ADDR_LEN)) {
  4051. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4052. }
  4053. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4054. /*
  4055. * Control path maintains a node count which is incremented
  4056. * for every new peer create command. Since new peer is not being
  4057. * created and earlier reference is reused here,
  4058. * peer_unref_delete event is sent to control path to
  4059. * increment the count back.
  4060. */
  4061. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  4062. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4063. peer->mac_addr.raw, vdev->mac_addr.raw,
  4064. vdev->opmode, peer->ctrl_peer, ctrl_peer);
  4065. }
  4066. peer->ctrl_peer = ctrl_peer;
  4067. dp_local_peer_id_alloc(pdev, peer);
  4068. DP_STATS_INIT(peer);
  4069. return (void *)peer;
  4070. } else {
  4071. /*
  4072. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  4073. * need to remove the AST entry which was earlier added as a WDS
  4074. * entry.
  4075. * If an AST entry exists, but no peer entry exists with a given
  4076. * MAC addresses, we could deduce it as a WDS entry
  4077. */
  4078. dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
  4079. }
  4080. #ifdef notyet
  4081. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  4082. soc->mempool_ol_ath_peer);
  4083. #else
  4084. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  4085. #endif
  4086. if (!peer)
  4087. return NULL; /* failure */
  4088. qdf_mem_zero(peer, sizeof(struct dp_peer));
  4089. TAILQ_INIT(&peer->ast_entry_list);
  4090. /* store provided params */
  4091. peer->vdev = vdev;
  4092. peer->ctrl_peer = ctrl_peer;
  4093. if ((vdev->opmode == wlan_op_mode_sta) &&
  4094. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4095. DP_MAC_ADDR_LEN)) {
  4096. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4097. }
  4098. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4099. qdf_spinlock_create(&peer->peer_info_lock);
  4100. qdf_mem_copy(
  4101. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  4102. /* TODO: See of rx_opt_proc is really required */
  4103. peer->rx_opt_proc = soc->rx_opt_proc;
  4104. /* initialize the peer_id */
  4105. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4106. peer->peer_ids[i] = HTT_INVALID_PEER;
  4107. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4108. qdf_atomic_init(&peer->ref_cnt);
  4109. /* keep one reference for attach */
  4110. qdf_atomic_inc(&peer->ref_cnt);
  4111. /* add this peer into the vdev's list */
  4112. if (wlan_op_mode_sta == vdev->opmode)
  4113. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  4114. else
  4115. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  4116. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4117. /* TODO: See if hash based search is required */
  4118. dp_peer_find_hash_add(soc, peer);
  4119. /* Initialize the peer state */
  4120. peer->state = OL_TXRX_PEER_STATE_DISC;
  4121. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4122. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  4123. vdev, peer, peer->mac_addr.raw,
  4124. qdf_atomic_read(&peer->ref_cnt));
  4125. /*
  4126. * For every peer MAp message search and set if bss_peer
  4127. */
  4128. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  4129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4130. "vdev bss_peer!!!!");
  4131. peer->bss_peer = 1;
  4132. vdev->vap_bss_peer = peer;
  4133. }
  4134. for (i = 0; i < DP_MAX_TIDS; i++)
  4135. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  4136. dp_local_peer_id_alloc(pdev, peer);
  4137. DP_STATS_INIT(peer);
  4138. return (void *)peer;
  4139. }
  4140. /*
  4141. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  4142. * @vdev: Datapath VDEV handle
  4143. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4144. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4145. *
  4146. * Return: None
  4147. */
  4148. static
  4149. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4150. enum cdp_host_reo_dest_ring *reo_dest,
  4151. bool *hash_based)
  4152. {
  4153. struct dp_soc *soc;
  4154. struct dp_pdev *pdev;
  4155. pdev = vdev->pdev;
  4156. soc = pdev->soc;
  4157. /*
  4158. * hash based steering is disabled for Radios which are offloaded
  4159. * to NSS
  4160. */
  4161. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4162. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4163. /*
  4164. * Below line of code will ensure the proper reo_dest ring is chosen
  4165. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4166. */
  4167. *reo_dest = pdev->reo_dest;
  4168. }
  4169. #ifdef IPA_OFFLOAD
  4170. /*
  4171. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4172. * @vdev: Datapath VDEV handle
  4173. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4174. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4175. *
  4176. * If IPA is enabled in ini, for SAP mode, disable hash based
  4177. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4178. * Return: None
  4179. */
  4180. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4181. enum cdp_host_reo_dest_ring *reo_dest,
  4182. bool *hash_based)
  4183. {
  4184. struct dp_soc *soc;
  4185. struct dp_pdev *pdev;
  4186. pdev = vdev->pdev;
  4187. soc = pdev->soc;
  4188. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4189. /*
  4190. * If IPA is enabled, disable hash-based flow steering and set
  4191. * reo_dest_ring_4 as the REO ring to receive packets on.
  4192. * IPA is configured to reap reo_dest_ring_4.
  4193. *
  4194. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4195. * value enum value is from 1 - 4.
  4196. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4197. */
  4198. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4199. if (vdev->opmode == wlan_op_mode_ap) {
  4200. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4201. *hash_based = 0;
  4202. }
  4203. }
  4204. }
  4205. #else
  4206. /*
  4207. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4208. * @vdev: Datapath VDEV handle
  4209. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4210. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4211. *
  4212. * Use system config values for hash based steering.
  4213. * Return: None
  4214. */
  4215. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4216. enum cdp_host_reo_dest_ring *reo_dest,
  4217. bool *hash_based)
  4218. {
  4219. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4220. }
  4221. #endif /* IPA_OFFLOAD */
  4222. /*
  4223. * dp_peer_setup_wifi3() - initialize the peer
  4224. * @vdev_hdl: virtual device object
  4225. * @peer: Peer object
  4226. *
  4227. * Return: void
  4228. */
  4229. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4230. {
  4231. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  4232. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  4233. struct dp_pdev *pdev;
  4234. struct dp_soc *soc;
  4235. bool hash_based = 0;
  4236. enum cdp_host_reo_dest_ring reo_dest;
  4237. /* preconditions */
  4238. qdf_assert(vdev);
  4239. qdf_assert(peer);
  4240. pdev = vdev->pdev;
  4241. soc = pdev->soc;
  4242. peer->last_assoc_rcvd = 0;
  4243. peer->last_disassoc_rcvd = 0;
  4244. peer->last_deauth_rcvd = 0;
  4245. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4246. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4247. pdev->pdev_id, vdev->vdev_id,
  4248. vdev->opmode, hash_based, reo_dest);
  4249. /*
  4250. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4251. * i.e both the devices have same MAC address. In these
  4252. * cases we want such pkts to be processed in NULL Q handler
  4253. * which is REO2TCL ring. for this reason we should
  4254. * not setup reo_queues and default route for bss_peer.
  4255. */
  4256. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
  4257. return;
  4258. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4259. /* TODO: Check the destination ring number to be passed to FW */
  4260. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4261. pdev->ctrl_pdev, peer->mac_addr.raw,
  4262. peer->vdev->vdev_id, hash_based, reo_dest);
  4263. }
  4264. qdf_atomic_set(&peer->is_default_route_set, 1);
  4265. dp_peer_rx_init(pdev, peer);
  4266. return;
  4267. }
  4268. /*
  4269. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  4270. * @vdev_handle: virtual device object
  4271. * @htt_pkt_type: type of pkt
  4272. *
  4273. * Return: void
  4274. */
  4275. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  4276. enum htt_cmn_pkt_type val)
  4277. {
  4278. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4279. vdev->tx_encap_type = val;
  4280. }
  4281. /*
  4282. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  4283. * @vdev_handle: virtual device object
  4284. * @htt_pkt_type: type of pkt
  4285. *
  4286. * Return: void
  4287. */
  4288. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  4289. enum htt_cmn_pkt_type val)
  4290. {
  4291. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4292. vdev->rx_decap_type = val;
  4293. }
  4294. /*
  4295. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  4296. * @txrx_soc: cdp soc handle
  4297. * @ac: Access category
  4298. * @value: timeout value in millisec
  4299. *
  4300. * Return: void
  4301. */
  4302. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4303. uint8_t ac, uint32_t value)
  4304. {
  4305. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4306. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  4307. }
  4308. /*
  4309. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  4310. * @txrx_soc: cdp soc handle
  4311. * @ac: access category
  4312. * @value: timeout value in millisec
  4313. *
  4314. * Return: void
  4315. */
  4316. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4317. uint8_t ac, uint32_t *value)
  4318. {
  4319. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4320. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  4321. }
  4322. /*
  4323. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  4324. * @pdev_handle: physical device object
  4325. * @val: reo destination ring index (1 - 4)
  4326. *
  4327. * Return: void
  4328. */
  4329. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  4330. enum cdp_host_reo_dest_ring val)
  4331. {
  4332. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4333. if (pdev)
  4334. pdev->reo_dest = val;
  4335. }
  4336. /*
  4337. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  4338. * @pdev_handle: physical device object
  4339. *
  4340. * Return: reo destination ring index
  4341. */
  4342. static enum cdp_host_reo_dest_ring
  4343. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  4344. {
  4345. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4346. if (pdev)
  4347. return pdev->reo_dest;
  4348. else
  4349. return cdp_host_reo_dest_ring_unknown;
  4350. }
  4351. /*
  4352. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  4353. * @pdev_handle: device object
  4354. * @val: value to be set
  4355. *
  4356. * Return: void
  4357. */
  4358. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  4359. uint32_t val)
  4360. {
  4361. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4362. /* Enable/Disable smart mesh filtering. This flag will be checked
  4363. * during rx processing to check if packets are from NAC clients.
  4364. */
  4365. pdev->filter_neighbour_peers = val;
  4366. return 0;
  4367. }
  4368. /*
  4369. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  4370. * address for smart mesh filtering
  4371. * @vdev_handle: virtual device object
  4372. * @cmd: Add/Del command
  4373. * @macaddr: nac client mac address
  4374. *
  4375. * Return: void
  4376. */
  4377. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  4378. uint32_t cmd, uint8_t *macaddr)
  4379. {
  4380. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4381. struct dp_pdev *pdev = vdev->pdev;
  4382. struct dp_neighbour_peer *peer = NULL;
  4383. if (!macaddr)
  4384. goto fail0;
  4385. /* Store address of NAC (neighbour peer) which will be checked
  4386. * against TA of received packets.
  4387. */
  4388. if (cmd == DP_NAC_PARAM_ADD) {
  4389. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  4390. sizeof(*peer));
  4391. if (!peer) {
  4392. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4393. FL("DP neighbour peer node memory allocation failed"));
  4394. goto fail0;
  4395. }
  4396. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  4397. macaddr, DP_MAC_ADDR_LEN);
  4398. peer->vdev = vdev;
  4399. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4400. /* add this neighbour peer into the list */
  4401. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  4402. neighbour_peer_list_elem);
  4403. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4404. /* first neighbour */
  4405. if (!pdev->neighbour_peers_added) {
  4406. pdev->neighbour_peers_added = true;
  4407. dp_ppdu_ring_cfg(pdev);
  4408. }
  4409. return 1;
  4410. } else if (cmd == DP_NAC_PARAM_DEL) {
  4411. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4412. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4413. neighbour_peer_list_elem) {
  4414. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  4415. macaddr, DP_MAC_ADDR_LEN)) {
  4416. /* delete this peer from the list */
  4417. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  4418. peer, neighbour_peer_list_elem);
  4419. qdf_mem_free(peer);
  4420. break;
  4421. }
  4422. }
  4423. /* last neighbour deleted */
  4424. if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
  4425. pdev->neighbour_peers_added = false;
  4426. dp_ppdu_ring_cfg(pdev);
  4427. }
  4428. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4429. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  4430. !pdev->enhanced_stats_en)
  4431. dp_ppdu_ring_reset(pdev);
  4432. return 1;
  4433. }
  4434. fail0:
  4435. return 0;
  4436. }
  4437. /*
  4438. * dp_get_sec_type() - Get the security type
  4439. * @peer: Datapath peer handle
  4440. * @sec_idx: Security id (mcast, ucast)
  4441. *
  4442. * return sec_type: Security type
  4443. */
  4444. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  4445. {
  4446. struct dp_peer *dpeer = (struct dp_peer *)peer;
  4447. return dpeer->security[sec_idx].sec_type;
  4448. }
  4449. /*
  4450. * dp_peer_authorize() - authorize txrx peer
  4451. * @peer_handle: Datapath peer handle
  4452. * @authorize
  4453. *
  4454. */
  4455. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  4456. {
  4457. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4458. struct dp_soc *soc;
  4459. if (peer != NULL) {
  4460. soc = peer->vdev->pdev->soc;
  4461. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4462. peer->authorize = authorize ? 1 : 0;
  4463. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4464. }
  4465. }
  4466. static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
  4467. struct dp_pdev *pdev,
  4468. struct dp_peer *peer,
  4469. uint32_t vdev_id)
  4470. {
  4471. struct dp_vdev *vdev = NULL;
  4472. struct dp_peer *bss_peer = NULL;
  4473. uint8_t *m_addr = NULL;
  4474. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4475. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4476. if (vdev->vdev_id == vdev_id)
  4477. break;
  4478. }
  4479. if (!vdev) {
  4480. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4481. "vdev is NULL");
  4482. } else {
  4483. if (vdev->vap_bss_peer == peer)
  4484. vdev->vap_bss_peer = NULL;
  4485. m_addr = peer->mac_addr.raw;
  4486. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  4487. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4488. m_addr, vdev->mac_addr.raw, vdev->opmode,
  4489. peer->ctrl_peer, NULL);
  4490. if (vdev && vdev->vap_bss_peer) {
  4491. bss_peer = vdev->vap_bss_peer;
  4492. DP_UPDATE_STATS(vdev, peer);
  4493. }
  4494. }
  4495. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4496. /*
  4497. * Peer AST list hast to be empty here
  4498. */
  4499. DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
  4500. qdf_mem_free(peer);
  4501. }
  4502. /**
  4503. * dp_delete_pending_vdev() - check and process vdev delete
  4504. * @pdev: DP specific pdev pointer
  4505. * @vdev: DP specific vdev pointer
  4506. * @vdev_id: vdev id corresponding to vdev
  4507. *
  4508. * This API does following:
  4509. * 1) It releases tx flow pools buffers as vdev is
  4510. * going down and no peers are associated.
  4511. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  4512. */
  4513. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4514. uint8_t vdev_id)
  4515. {
  4516. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  4517. void *vdev_delete_context = NULL;
  4518. vdev_delete_cb = vdev->delete.callback;
  4519. vdev_delete_context = vdev->delete.context;
  4520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4521. FL("deleting vdev object %pK (%pM)- its last peer is done"),
  4522. vdev, vdev->mac_addr.raw);
  4523. /* all peers are gone, go ahead and delete it */
  4524. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  4525. FLOW_TYPE_VDEV, vdev_id);
  4526. dp_tx_vdev_detach(vdev);
  4527. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4528. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4529. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4530. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4531. FL("deleting vdev object %pK (%pM)"),
  4532. vdev, vdev->mac_addr.raw);
  4533. qdf_mem_free(vdev);
  4534. vdev = NULL;
  4535. if (vdev_delete_cb)
  4536. vdev_delete_cb(vdev_delete_context);
  4537. }
  4538. /*
  4539. * dp_peer_unref_delete() - unref and delete peer
  4540. * @peer_handle: Datapath peer handle
  4541. *
  4542. */
  4543. void dp_peer_unref_delete(void *peer_handle)
  4544. {
  4545. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4546. struct dp_vdev *vdev = peer->vdev;
  4547. struct dp_pdev *pdev = vdev->pdev;
  4548. struct dp_soc *soc = pdev->soc;
  4549. struct dp_peer *tmppeer;
  4550. int found = 0;
  4551. uint16_t peer_id;
  4552. uint16_t vdev_id;
  4553. bool delete_vdev;
  4554. /*
  4555. * Hold the lock all the way from checking if the peer ref count
  4556. * is zero until the peer references are removed from the hash
  4557. * table and vdev list (if the peer ref count is zero).
  4558. * This protects against a new HL tx operation starting to use the
  4559. * peer object just after this function concludes it's done being used.
  4560. * Furthermore, the lock needs to be held while checking whether the
  4561. * vdev's list of peers is empty, to make sure that list is not modified
  4562. * concurrently with the empty check.
  4563. */
  4564. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4565. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  4566. peer_id = peer->peer_ids[0];
  4567. vdev_id = vdev->vdev_id;
  4568. /*
  4569. * Make sure that the reference to the peer in
  4570. * peer object map is removed
  4571. */
  4572. if (peer_id != HTT_INVALID_PEER)
  4573. soc->peer_id_to_obj_map[peer_id] = NULL;
  4574. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4575. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  4576. /* remove the reference to the peer from the hash table */
  4577. dp_peer_find_hash_remove(soc, peer);
  4578. qdf_spin_lock_bh(&soc->ast_lock);
  4579. if (peer->self_ast_entry) {
  4580. dp_peer_del_ast(soc, peer->self_ast_entry);
  4581. peer->self_ast_entry = NULL;
  4582. }
  4583. qdf_spin_unlock_bh(&soc->ast_lock);
  4584. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  4585. if (tmppeer == peer) {
  4586. found = 1;
  4587. break;
  4588. }
  4589. }
  4590. if (found) {
  4591. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  4592. peer_list_elem);
  4593. } else {
  4594. /*Ignoring the remove operation as peer not found*/
  4595. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4596. "peer:%pK not found in vdev:%pK peerlist:%pK",
  4597. peer, vdev, &peer->vdev->peer_list);
  4598. }
  4599. /* cleanup the peer data */
  4600. dp_peer_cleanup(vdev, peer);
  4601. /* check whether the parent vdev has no peers left */
  4602. if (TAILQ_EMPTY(&vdev->peer_list)) {
  4603. /*
  4604. * capture vdev delete pending flag's status
  4605. * while holding peer_ref_mutex lock
  4606. */
  4607. delete_vdev = vdev->delete.pending;
  4608. /*
  4609. * Now that there are no references to the peer, we can
  4610. * release the peer reference lock.
  4611. */
  4612. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4613. /*
  4614. * Check if the parent vdev was waiting for its peers
  4615. * to be deleted, in order for it to be deleted too.
  4616. */
  4617. if (delete_vdev)
  4618. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  4619. } else {
  4620. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4621. }
  4622. dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
  4623. } else {
  4624. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4625. }
  4626. }
  4627. /*
  4628. * dp_peer_detach_wifi3() – Detach txrx peer
  4629. * @peer_handle: Datapath peer handle
  4630. * @bitmap: bitmap indicating special handling of request.
  4631. *
  4632. */
  4633. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  4634. {
  4635. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4636. /* redirect the peer's rx delivery function to point to a
  4637. * discard func
  4638. */
  4639. peer->rx_opt_proc = dp_rx_discard;
  4640. /* Do not make ctrl_peer to NULL for connected sta peers.
  4641. * We need ctrl_peer to release the reference during dp
  4642. * peer free. This reference was held for
  4643. * obj_mgr peer during the creation of dp peer.
  4644. */
  4645. if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
  4646. !peer->bss_peer))
  4647. peer->ctrl_peer = NULL;
  4648. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4649. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  4650. dp_local_peer_id_free(peer->vdev->pdev, peer);
  4651. qdf_spinlock_destroy(&peer->peer_info_lock);
  4652. /*
  4653. * Remove the reference added during peer_attach.
  4654. * The peer will still be left allocated until the
  4655. * PEER_UNMAP message arrives to remove the other
  4656. * reference, added by the PEER_MAP message.
  4657. */
  4658. dp_peer_unref_delete(peer_handle);
  4659. }
  4660. /*
  4661. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  4662. * @peer_handle: Datapath peer handle
  4663. *
  4664. */
  4665. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  4666. {
  4667. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4668. return vdev->mac_addr.raw;
  4669. }
  4670. /*
  4671. * dp_vdev_set_wds() - Enable per packet stats
  4672. * @vdev_handle: DP VDEV handle
  4673. * @val: value
  4674. *
  4675. * Return: none
  4676. */
  4677. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  4678. {
  4679. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4680. vdev->wds_enabled = val;
  4681. return 0;
  4682. }
  4683. /*
  4684. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  4685. * @peer_handle: Datapath peer handle
  4686. *
  4687. */
  4688. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  4689. uint8_t vdev_id)
  4690. {
  4691. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4692. struct dp_vdev *vdev = NULL;
  4693. if (qdf_unlikely(!pdev))
  4694. return NULL;
  4695. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4696. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4697. if (vdev->vdev_id == vdev_id)
  4698. break;
  4699. }
  4700. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4701. return (struct cdp_vdev *)vdev;
  4702. }
  4703. /*
  4704. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
  4705. * @dev: PDEV handle
  4706. *
  4707. * Return: VDEV handle of monitor mode
  4708. */
  4709. static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
  4710. {
  4711. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4712. if (qdf_unlikely(!pdev))
  4713. return NULL;
  4714. return (struct cdp_vdev *)pdev->monitor_vdev;
  4715. }
  4716. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  4717. {
  4718. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4719. return vdev->opmode;
  4720. }
  4721. static
  4722. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
  4723. ol_txrx_rx_fp *stack_fn_p,
  4724. ol_osif_vdev_handle *osif_vdev_p)
  4725. {
  4726. struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
  4727. qdf_assert(vdev);
  4728. *stack_fn_p = vdev->osif_rx_stack;
  4729. *osif_vdev_p = vdev->osif_vdev;
  4730. }
  4731. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  4732. {
  4733. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4734. struct dp_pdev *pdev = vdev->pdev;
  4735. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  4736. }
  4737. /**
  4738. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  4739. * ring based on target
  4740. * @soc: soc handle
  4741. * @mac_for_pdev: pdev_id
  4742. * @pdev: physical device handle
  4743. * @ring_num: mac id
  4744. * @htt_tlv_filter: tlv filter
  4745. *
  4746. * Return: zero on success, non-zero on failure
  4747. */
  4748. static inline
  4749. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  4750. struct dp_pdev *pdev, uint8_t ring_num,
  4751. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  4752. {
  4753. QDF_STATUS status;
  4754. if (soc->wlan_cfg_ctx->rxdma1_enable)
  4755. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4756. pdev->rxdma_mon_buf_ring[ring_num]
  4757. .hal_srng,
  4758. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  4759. &htt_tlv_filter);
  4760. else
  4761. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4762. pdev->rx_mac_buf_ring[ring_num]
  4763. .hal_srng,
  4764. RXDMA_BUF, RX_BUFFER_SIZE,
  4765. &htt_tlv_filter);
  4766. return status;
  4767. }
  4768. /**
  4769. * dp_reset_monitor_mode() - Disable monitor mode
  4770. * @pdev_handle: Datapath PDEV handle
  4771. *
  4772. * Return: 0 on success, not 0 on failure
  4773. */
  4774. static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  4775. {
  4776. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4777. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4778. struct dp_soc *soc = pdev->soc;
  4779. uint8_t pdev_id;
  4780. int mac_id;
  4781. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4782. pdev_id = pdev->pdev_id;
  4783. soc = pdev->soc;
  4784. qdf_spin_lock_bh(&pdev->mon_lock);
  4785. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  4786. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4787. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4788. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4789. pdev, mac_id,
  4790. htt_tlv_filter);
  4791. if (status != QDF_STATUS_SUCCESS) {
  4792. dp_err("Failed to send tlv filter for monitor mode rings");
  4793. return status;
  4794. }
  4795. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4796. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4797. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  4798. &htt_tlv_filter);
  4799. }
  4800. pdev->monitor_vdev = NULL;
  4801. pdev->mcopy_mode = 0;
  4802. pdev->monitor_configured = false;
  4803. qdf_spin_unlock_bh(&pdev->mon_lock);
  4804. return QDF_STATUS_SUCCESS;
  4805. }
  4806. /**
  4807. * dp_set_nac() - set peer_nac
  4808. * @peer_handle: Datapath PEER handle
  4809. *
  4810. * Return: void
  4811. */
  4812. static void dp_set_nac(struct cdp_peer *peer_handle)
  4813. {
  4814. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4815. peer->nac = 1;
  4816. }
  4817. /**
  4818. * dp_get_tx_pending() - read pending tx
  4819. * @pdev_handle: Datapath PDEV handle
  4820. *
  4821. * Return: outstanding tx
  4822. */
  4823. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  4824. {
  4825. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4826. return qdf_atomic_read(&pdev->num_tx_outstanding);
  4827. }
  4828. /**
  4829. * dp_get_peer_mac_from_peer_id() - get peer mac
  4830. * @pdev_handle: Datapath PDEV handle
  4831. * @peer_id: Peer ID
  4832. * @peer_mac: MAC addr of PEER
  4833. *
  4834. * Return: void
  4835. */
  4836. static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
  4837. uint32_t peer_id, uint8_t *peer_mac)
  4838. {
  4839. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4840. struct dp_peer *peer;
  4841. if (pdev && peer_mac) {
  4842. peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
  4843. if (peer) {
  4844. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  4845. DP_MAC_ADDR_LEN);
  4846. dp_peer_unref_del_find_by_id(peer);
  4847. }
  4848. }
  4849. }
  4850. /**
  4851. * dp_pdev_configure_monitor_rings() - configure monitor rings
  4852. * @vdev_handle: Datapath VDEV handle
  4853. *
  4854. * Return: void
  4855. */
  4856. static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
  4857. {
  4858. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4859. struct dp_soc *soc;
  4860. uint8_t pdev_id;
  4861. int mac_id;
  4862. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4863. pdev_id = pdev->pdev_id;
  4864. soc = pdev->soc;
  4865. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4866. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4867. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4868. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4869. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4870. pdev->mo_data_filter);
  4871. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  4872. htt_tlv_filter.mpdu_start = 1;
  4873. htt_tlv_filter.msdu_start = 1;
  4874. htt_tlv_filter.packet = 1;
  4875. htt_tlv_filter.msdu_end = 1;
  4876. htt_tlv_filter.mpdu_end = 1;
  4877. htt_tlv_filter.packet_header = 1;
  4878. htt_tlv_filter.attention = 1;
  4879. htt_tlv_filter.ppdu_start = 0;
  4880. htt_tlv_filter.ppdu_end = 0;
  4881. htt_tlv_filter.ppdu_end_user_stats = 0;
  4882. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4883. htt_tlv_filter.ppdu_end_status_done = 0;
  4884. htt_tlv_filter.header_per_msdu = 1;
  4885. htt_tlv_filter.enable_fp =
  4886. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4887. htt_tlv_filter.enable_md = 0;
  4888. htt_tlv_filter.enable_mo =
  4889. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4890. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4891. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4892. if (pdev->mcopy_mode)
  4893. htt_tlv_filter.fp_data_filter = 0;
  4894. else
  4895. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4896. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4897. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4898. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4899. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4900. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4901. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4902. pdev, mac_id,
  4903. htt_tlv_filter);
  4904. if (status != QDF_STATUS_SUCCESS) {
  4905. dp_err("Failed to send tlv filter for monitor mode rings");
  4906. return status;
  4907. }
  4908. }
  4909. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  4910. htt_tlv_filter.mpdu_start = 1;
  4911. htt_tlv_filter.msdu_start = 0;
  4912. htt_tlv_filter.packet = 0;
  4913. htt_tlv_filter.msdu_end = 0;
  4914. htt_tlv_filter.mpdu_end = 0;
  4915. htt_tlv_filter.attention = 0;
  4916. htt_tlv_filter.ppdu_start = 1;
  4917. htt_tlv_filter.ppdu_end = 1;
  4918. htt_tlv_filter.ppdu_end_user_stats = 1;
  4919. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4920. htt_tlv_filter.ppdu_end_status_done = 1;
  4921. htt_tlv_filter.enable_fp = 1;
  4922. htt_tlv_filter.enable_md = 0;
  4923. htt_tlv_filter.enable_mo = 1;
  4924. if (pdev->mcopy_mode) {
  4925. htt_tlv_filter.packet_header = 1;
  4926. }
  4927. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4928. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4929. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4930. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4931. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4932. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4933. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4934. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4935. pdev->pdev_id);
  4936. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4937. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4938. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4939. }
  4940. return status;
  4941. }
  4942. /**
  4943. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  4944. * @vdev_handle: Datapath VDEV handle
  4945. * @smart_monitor: Flag to denote if its smart monitor mode
  4946. *
  4947. * Return: 0 on success, not 0 on failure
  4948. */
  4949. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  4950. uint8_t smart_monitor)
  4951. {
  4952. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4953. struct dp_pdev *pdev;
  4954. qdf_assert(vdev);
  4955. pdev = vdev->pdev;
  4956. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4957. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  4958. pdev, pdev->pdev_id, pdev->soc, vdev);
  4959. /*Check if current pdev's monitor_vdev exists */
  4960. if (pdev->monitor_configured) {
  4961. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4962. "monitor vap already created vdev=%pK\n", vdev);
  4963. qdf_assert(vdev);
  4964. return QDF_STATUS_E_RESOURCES;
  4965. }
  4966. pdev->monitor_vdev = vdev;
  4967. pdev->monitor_configured = true;
  4968. /* If smart monitor mode, do not configure monitor ring */
  4969. if (smart_monitor)
  4970. return QDF_STATUS_SUCCESS;
  4971. return dp_pdev_configure_monitor_rings(pdev);
  4972. }
  4973. /**
  4974. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  4975. * @pdev_handle: Datapath PDEV handle
  4976. * @filter_val: Flag to select Filter for monitor mode
  4977. * Return: 0 on success, not 0 on failure
  4978. */
  4979. static QDF_STATUS
  4980. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  4981. struct cdp_monitor_filter *filter_val)
  4982. {
  4983. /* Many monitor VAPs can exists in a system but only one can be up at
  4984. * anytime
  4985. */
  4986. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4987. struct dp_vdev *vdev = pdev->monitor_vdev;
  4988. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4989. struct dp_soc *soc;
  4990. uint8_t pdev_id;
  4991. int mac_id;
  4992. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4993. pdev_id = pdev->pdev_id;
  4994. soc = pdev->soc;
  4995. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4996. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4997. pdev, pdev_id, soc, vdev);
  4998. /*Check if current pdev's monitor_vdev exists */
  4999. if (!pdev->monitor_vdev) {
  5000. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5001. "vdev=%pK", vdev);
  5002. qdf_assert(vdev);
  5003. }
  5004. /* update filter mode, type in pdev structure */
  5005. pdev->mon_filter_mode = filter_val->mode;
  5006. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  5007. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  5008. pdev->fp_data_filter = filter_val->fp_data;
  5009. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  5010. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  5011. pdev->mo_data_filter = filter_val->mo_data;
  5012. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  5013. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  5014. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  5015. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  5016. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  5017. pdev->mo_data_filter);
  5018. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5019. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5020. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5021. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5022. pdev, mac_id,
  5023. htt_tlv_filter);
  5024. if (status != QDF_STATUS_SUCCESS) {
  5025. dp_err("Failed to send tlv filter for monitor mode rings");
  5026. return status;
  5027. }
  5028. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5029. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5030. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5031. }
  5032. htt_tlv_filter.mpdu_start = 1;
  5033. htt_tlv_filter.msdu_start = 1;
  5034. htt_tlv_filter.packet = 1;
  5035. htt_tlv_filter.msdu_end = 1;
  5036. htt_tlv_filter.mpdu_end = 1;
  5037. htt_tlv_filter.packet_header = 1;
  5038. htt_tlv_filter.attention = 1;
  5039. htt_tlv_filter.ppdu_start = 0;
  5040. htt_tlv_filter.ppdu_end = 0;
  5041. htt_tlv_filter.ppdu_end_user_stats = 0;
  5042. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  5043. htt_tlv_filter.ppdu_end_status_done = 0;
  5044. htt_tlv_filter.header_per_msdu = 1;
  5045. htt_tlv_filter.enable_fp =
  5046. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  5047. htt_tlv_filter.enable_md = 0;
  5048. htt_tlv_filter.enable_mo =
  5049. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  5050. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  5051. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  5052. if (pdev->mcopy_mode)
  5053. htt_tlv_filter.fp_data_filter = 0;
  5054. else
  5055. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  5056. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  5057. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  5058. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  5059. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5060. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5061. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5062. pdev, mac_id,
  5063. htt_tlv_filter);
  5064. if (status != QDF_STATUS_SUCCESS) {
  5065. dp_err("Failed to send tlv filter for monitor mode rings");
  5066. return status;
  5067. }
  5068. }
  5069. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5070. htt_tlv_filter.mpdu_start = 1;
  5071. htt_tlv_filter.msdu_start = 0;
  5072. htt_tlv_filter.packet = 0;
  5073. htt_tlv_filter.msdu_end = 0;
  5074. htt_tlv_filter.mpdu_end = 0;
  5075. htt_tlv_filter.attention = 0;
  5076. htt_tlv_filter.ppdu_start = 1;
  5077. htt_tlv_filter.ppdu_end = 1;
  5078. htt_tlv_filter.ppdu_end_user_stats = 1;
  5079. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5080. htt_tlv_filter.ppdu_end_status_done = 1;
  5081. htt_tlv_filter.enable_fp = 1;
  5082. htt_tlv_filter.enable_md = 0;
  5083. htt_tlv_filter.enable_mo = 1;
  5084. if (pdev->mcopy_mode) {
  5085. htt_tlv_filter.packet_header = 1;
  5086. }
  5087. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5088. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5089. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5090. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5091. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5092. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5093. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5094. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  5095. pdev->pdev_id);
  5096. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5097. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5098. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5099. }
  5100. return QDF_STATUS_SUCCESS;
  5101. }
  5102. /**
  5103. * dp_get_pdev_id_frm_pdev() - get pdev_id
  5104. * @pdev_handle: Datapath PDEV handle
  5105. *
  5106. * Return: pdev_id
  5107. */
  5108. static
  5109. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  5110. {
  5111. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5112. return pdev->pdev_id;
  5113. }
  5114. /**
  5115. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  5116. * @pdev_handle: Datapath PDEV handle
  5117. * @chan_noise_floor: Channel Noise Floor
  5118. *
  5119. * Return: void
  5120. */
  5121. static
  5122. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  5123. int16_t chan_noise_floor)
  5124. {
  5125. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5126. pdev->chan_noise_floor = chan_noise_floor;
  5127. }
  5128. /**
  5129. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  5130. * @vdev_handle: Datapath VDEV handle
  5131. * Return: true on ucast filter flag set
  5132. */
  5133. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  5134. {
  5135. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5136. struct dp_pdev *pdev;
  5137. pdev = vdev->pdev;
  5138. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  5139. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  5140. return true;
  5141. return false;
  5142. }
  5143. /**
  5144. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  5145. * @vdev_handle: Datapath VDEV handle
  5146. * Return: true on mcast filter flag set
  5147. */
  5148. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  5149. {
  5150. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5151. struct dp_pdev *pdev;
  5152. pdev = vdev->pdev;
  5153. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  5154. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  5155. return true;
  5156. return false;
  5157. }
  5158. /**
  5159. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  5160. * @vdev_handle: Datapath VDEV handle
  5161. * Return: true on non data filter flag set
  5162. */
  5163. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  5164. {
  5165. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5166. struct dp_pdev *pdev;
  5167. pdev = vdev->pdev;
  5168. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  5169. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  5170. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  5171. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  5172. return true;
  5173. }
  5174. }
  5175. return false;
  5176. }
  5177. #ifdef MESH_MODE_SUPPORT
  5178. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  5179. {
  5180. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5181. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5182. FL("val %d"), val);
  5183. vdev->mesh_vdev = val;
  5184. }
  5185. /*
  5186. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  5187. * @vdev_hdl: virtual device object
  5188. * @val: value to be set
  5189. *
  5190. * Return: void
  5191. */
  5192. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  5193. {
  5194. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5196. FL("val %d"), val);
  5197. vdev->mesh_rx_filter = val;
  5198. }
  5199. #endif
  5200. /*
  5201. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  5202. * Current scope is bar received count
  5203. *
  5204. * @pdev_handle: DP_PDEV handle
  5205. *
  5206. * Return: void
  5207. */
  5208. #define STATS_PROC_TIMEOUT (HZ/1000)
  5209. static void
  5210. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  5211. {
  5212. struct dp_vdev *vdev;
  5213. struct dp_peer *peer;
  5214. uint32_t waitcnt;
  5215. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5216. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5217. if (!peer) {
  5218. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5219. FL("DP Invalid Peer refernce"));
  5220. return;
  5221. }
  5222. if (peer->delete_in_progress) {
  5223. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5224. FL("DP Peer deletion in progress"));
  5225. continue;
  5226. }
  5227. qdf_atomic_inc(&peer->ref_cnt);
  5228. waitcnt = 0;
  5229. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  5230. while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
  5231. && waitcnt < 10) {
  5232. schedule_timeout_interruptible(
  5233. STATS_PROC_TIMEOUT);
  5234. waitcnt++;
  5235. }
  5236. qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
  5237. dp_peer_unref_delete(peer);
  5238. }
  5239. }
  5240. }
  5241. /**
  5242. * dp_rx_bar_stats_cb(): BAR received stats callback
  5243. * @soc: SOC handle
  5244. * @cb_ctxt: Call back context
  5245. * @reo_status: Reo status
  5246. *
  5247. * return: void
  5248. */
  5249. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  5250. union hal_reo_status *reo_status)
  5251. {
  5252. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  5253. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  5254. if (!qdf_atomic_read(&soc->cmn_init_done))
  5255. return;
  5256. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  5257. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  5258. queue_status->header.status);
  5259. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5260. return;
  5261. }
  5262. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  5263. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5264. }
  5265. /**
  5266. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  5267. * @vdev: DP VDEV handle
  5268. *
  5269. * return: void
  5270. */
  5271. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  5272. struct cdp_vdev_stats *vdev_stats)
  5273. {
  5274. struct dp_peer *peer = NULL;
  5275. struct dp_soc *soc = NULL;
  5276. if (!vdev || !vdev->pdev)
  5277. return;
  5278. soc = vdev->pdev->soc;
  5279. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  5280. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  5281. dp_update_vdev_stats(vdev_stats, peer);
  5282. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5283. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5284. vdev_stats, vdev->vdev_id,
  5285. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5286. #endif
  5287. }
  5288. /**
  5289. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  5290. * @pdev: DP PDEV handle
  5291. *
  5292. * return: void
  5293. */
  5294. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  5295. {
  5296. struct dp_vdev *vdev = NULL;
  5297. struct dp_soc *soc;
  5298. struct cdp_vdev_stats *vdev_stats =
  5299. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5300. if (!vdev_stats) {
  5301. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5302. "DP alloc failure - unable to get alloc vdev stats");
  5303. return;
  5304. }
  5305. qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
  5306. qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
  5307. qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
  5308. if (pdev->mcopy_mode)
  5309. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  5310. soc = pdev->soc;
  5311. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5312. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5313. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5314. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5315. dp_update_pdev_stats(pdev, vdev_stats);
  5316. dp_update_pdev_ingress_stats(pdev, vdev);
  5317. }
  5318. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5319. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5320. qdf_mem_free(vdev_stats);
  5321. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5322. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  5323. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  5324. #endif
  5325. }
  5326. /**
  5327. * dp_vdev_getstats() - get vdev packet level stats
  5328. * @vdev_handle: Datapath VDEV handle
  5329. * @stats: cdp network device stats structure
  5330. *
  5331. * Return: void
  5332. */
  5333. static void dp_vdev_getstats(void *vdev_handle,
  5334. struct cdp_dev_stats *stats)
  5335. {
  5336. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5337. struct dp_pdev *pdev;
  5338. struct dp_soc *soc;
  5339. struct cdp_vdev_stats *vdev_stats;
  5340. if (!vdev)
  5341. return;
  5342. pdev = vdev->pdev;
  5343. if (!pdev)
  5344. return;
  5345. soc = pdev->soc;
  5346. vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5347. if (!vdev_stats) {
  5348. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5349. "DP alloc failure - unable to get alloc vdev stats");
  5350. return;
  5351. }
  5352. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5353. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5354. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5355. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  5356. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  5357. stats->tx_errors = vdev_stats->tx.tx_failed +
  5358. vdev_stats->tx_i.dropped.dropped_pkt.num;
  5359. stats->tx_dropped = stats->tx_errors;
  5360. stats->rx_packets = vdev_stats->rx.unicast.num +
  5361. vdev_stats->rx.multicast.num +
  5362. vdev_stats->rx.bcast.num;
  5363. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  5364. vdev_stats->rx.multicast.bytes +
  5365. vdev_stats->rx.bcast.bytes;
  5366. }
  5367. /**
  5368. * dp_pdev_getstats() - get pdev packet level stats
  5369. * @pdev_handle: Datapath PDEV handle
  5370. * @stats: cdp network device stats structure
  5371. *
  5372. * Return: void
  5373. */
  5374. static void dp_pdev_getstats(void *pdev_handle,
  5375. struct cdp_dev_stats *stats)
  5376. {
  5377. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5378. dp_aggregate_pdev_stats(pdev);
  5379. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  5380. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  5381. stats->tx_errors = pdev->stats.tx.tx_failed +
  5382. pdev->stats.tx_i.dropped.dropped_pkt.num;
  5383. stats->tx_dropped = stats->tx_errors;
  5384. stats->rx_packets = pdev->stats.rx.unicast.num +
  5385. pdev->stats.rx.multicast.num +
  5386. pdev->stats.rx.bcast.num;
  5387. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  5388. pdev->stats.rx.multicast.bytes +
  5389. pdev->stats.rx.bcast.bytes;
  5390. }
  5391. /**
  5392. * dp_get_device_stats() - get interface level packet stats
  5393. * @handle: device handle
  5394. * @stats: cdp network device stats structure
  5395. * @type: device type pdev/vdev
  5396. *
  5397. * Return: void
  5398. */
  5399. static void dp_get_device_stats(void *handle,
  5400. struct cdp_dev_stats *stats, uint8_t type)
  5401. {
  5402. switch (type) {
  5403. case UPDATE_VDEV_STATS:
  5404. dp_vdev_getstats(handle, stats);
  5405. break;
  5406. case UPDATE_PDEV_STATS:
  5407. dp_pdev_getstats(handle, stats);
  5408. break;
  5409. default:
  5410. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5411. "apstats cannot be updated for this input "
  5412. "type %d", type);
  5413. break;
  5414. }
  5415. }
  5416. /**
  5417. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  5418. * @pdev: DP_PDEV Handle
  5419. *
  5420. * Return:void
  5421. */
  5422. static inline void
  5423. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  5424. {
  5425. uint8_t index = 0;
  5426. DP_PRINT_STATS("PDEV Tx Stats:\n");
  5427. DP_PRINT_STATS("Received From Stack:");
  5428. DP_PRINT_STATS(" Packets = %d",
  5429. pdev->stats.tx_i.rcvd.num);
  5430. DP_PRINT_STATS(" Bytes = %llu",
  5431. pdev->stats.tx_i.rcvd.bytes);
  5432. DP_PRINT_STATS("Processed:");
  5433. DP_PRINT_STATS(" Packets = %d",
  5434. pdev->stats.tx_i.processed.num);
  5435. DP_PRINT_STATS(" Bytes = %llu",
  5436. pdev->stats.tx_i.processed.bytes);
  5437. DP_PRINT_STATS("Total Completions:");
  5438. DP_PRINT_STATS(" Packets = %u",
  5439. pdev->stats.tx.comp_pkt.num);
  5440. DP_PRINT_STATS(" Bytes = %llu",
  5441. pdev->stats.tx.comp_pkt.bytes);
  5442. DP_PRINT_STATS("Successful Completions:");
  5443. DP_PRINT_STATS(" Packets = %u",
  5444. pdev->stats.tx.tx_success.num);
  5445. DP_PRINT_STATS(" Bytes = %llu",
  5446. pdev->stats.tx.tx_success.bytes);
  5447. DP_PRINT_STATS("Dropped:");
  5448. DP_PRINT_STATS(" Total = %d",
  5449. pdev->stats.tx_i.dropped.dropped_pkt.num);
  5450. DP_PRINT_STATS(" Dma_map_error = %d",
  5451. pdev->stats.tx_i.dropped.dma_error);
  5452. DP_PRINT_STATS(" Ring Full = %d",
  5453. pdev->stats.tx_i.dropped.ring_full);
  5454. DP_PRINT_STATS(" Descriptor Not available = %d",
  5455. pdev->stats.tx_i.dropped.desc_na.num);
  5456. DP_PRINT_STATS(" HW enqueue failed= %d",
  5457. pdev->stats.tx_i.dropped.enqueue_fail);
  5458. DP_PRINT_STATS(" Resources Full = %d",
  5459. pdev->stats.tx_i.dropped.res_full);
  5460. DP_PRINT_STATS(" FW removed Pkts = %u",
  5461. pdev->stats.tx.dropped.fw_rem.num);
  5462. DP_PRINT_STATS(" FW removed bytes= %llu",
  5463. pdev->stats.tx.dropped.fw_rem.bytes);
  5464. DP_PRINT_STATS(" FW removed transmitted = %d",
  5465. pdev->stats.tx.dropped.fw_rem_tx);
  5466. DP_PRINT_STATS(" FW removed untransmitted = %d",
  5467. pdev->stats.tx.dropped.fw_rem_notx);
  5468. DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
  5469. pdev->stats.tx.dropped.fw_reason1);
  5470. DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
  5471. pdev->stats.tx.dropped.fw_reason2);
  5472. DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
  5473. pdev->stats.tx.dropped.fw_reason3);
  5474. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  5475. pdev->stats.tx.dropped.age_out);
  5476. DP_PRINT_STATS(" headroom insufficient = %d",
  5477. pdev->stats.tx_i.dropped.headroom_insufficient);
  5478. DP_PRINT_STATS(" Multicast:");
  5479. DP_PRINT_STATS(" Packets: %u",
  5480. pdev->stats.tx.mcast.num);
  5481. DP_PRINT_STATS(" Bytes: %llu",
  5482. pdev->stats.tx.mcast.bytes);
  5483. DP_PRINT_STATS("Scatter Gather:");
  5484. DP_PRINT_STATS(" Packets = %d",
  5485. pdev->stats.tx_i.sg.sg_pkt.num);
  5486. DP_PRINT_STATS(" Bytes = %llu",
  5487. pdev->stats.tx_i.sg.sg_pkt.bytes);
  5488. DP_PRINT_STATS(" Dropped By Host = %d",
  5489. pdev->stats.tx_i.sg.dropped_host.num);
  5490. DP_PRINT_STATS(" Dropped By Target = %d",
  5491. pdev->stats.tx_i.sg.dropped_target);
  5492. DP_PRINT_STATS("TSO:");
  5493. DP_PRINT_STATS(" Number of Segments = %d",
  5494. pdev->stats.tx_i.tso.num_seg);
  5495. DP_PRINT_STATS(" Packets = %d",
  5496. pdev->stats.tx_i.tso.tso_pkt.num);
  5497. DP_PRINT_STATS(" Bytes = %llu",
  5498. pdev->stats.tx_i.tso.tso_pkt.bytes);
  5499. DP_PRINT_STATS(" Dropped By Host = %d",
  5500. pdev->stats.tx_i.tso.dropped_host.num);
  5501. DP_PRINT_STATS("Mcast Enhancement:");
  5502. DP_PRINT_STATS(" Packets = %d",
  5503. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  5504. DP_PRINT_STATS(" Bytes = %llu",
  5505. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  5506. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  5507. pdev->stats.tx_i.mcast_en.dropped_map_error);
  5508. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  5509. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  5510. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  5511. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  5512. DP_PRINT_STATS(" Unicast sent = %d",
  5513. pdev->stats.tx_i.mcast_en.ucast);
  5514. DP_PRINT_STATS("Raw:");
  5515. DP_PRINT_STATS(" Packets = %d",
  5516. pdev->stats.tx_i.raw.raw_pkt.num);
  5517. DP_PRINT_STATS(" Bytes = %llu",
  5518. pdev->stats.tx_i.raw.raw_pkt.bytes);
  5519. DP_PRINT_STATS(" DMA map error = %d",
  5520. pdev->stats.tx_i.raw.dma_map_error);
  5521. DP_PRINT_STATS("Reinjected:");
  5522. DP_PRINT_STATS(" Packets = %d",
  5523. pdev->stats.tx_i.reinject_pkts.num);
  5524. DP_PRINT_STATS(" Bytes = %llu\n",
  5525. pdev->stats.tx_i.reinject_pkts.bytes);
  5526. DP_PRINT_STATS("Inspected:");
  5527. DP_PRINT_STATS(" Packets = %d",
  5528. pdev->stats.tx_i.inspect_pkts.num);
  5529. DP_PRINT_STATS(" Bytes = %llu",
  5530. pdev->stats.tx_i.inspect_pkts.bytes);
  5531. DP_PRINT_STATS("Nawds Multicast:");
  5532. DP_PRINT_STATS(" Packets = %d",
  5533. pdev->stats.tx_i.nawds_mcast.num);
  5534. DP_PRINT_STATS(" Bytes = %llu",
  5535. pdev->stats.tx_i.nawds_mcast.bytes);
  5536. DP_PRINT_STATS("CCE Classified:");
  5537. DP_PRINT_STATS(" CCE Classified Packets: %u",
  5538. pdev->stats.tx_i.cce_classified);
  5539. DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
  5540. pdev->stats.tx_i.cce_classified_raw);
  5541. DP_PRINT_STATS("Mesh stats:");
  5542. DP_PRINT_STATS(" frames to firmware: %u",
  5543. pdev->stats.tx_i.mesh.exception_fw);
  5544. DP_PRINT_STATS(" completions from fw: %u",
  5545. pdev->stats.tx_i.mesh.completion_fw);
  5546. DP_PRINT_STATS("PPDU stats counter");
  5547. for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
  5548. DP_PRINT_STATS(" Tag[%d] = %llu", index,
  5549. pdev->stats.ppdu_stats_counter[index]);
  5550. }
  5551. }
  5552. /**
  5553. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  5554. * @pdev: DP_PDEV Handle
  5555. *
  5556. * Return: void
  5557. */
  5558. static inline void
  5559. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  5560. {
  5561. DP_PRINT_STATS("PDEV Rx Stats:\n");
  5562. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  5563. DP_PRINT_STATS(" Packets = %d %d %d %d",
  5564. pdev->stats.rx.rcvd_reo[0].num,
  5565. pdev->stats.rx.rcvd_reo[1].num,
  5566. pdev->stats.rx.rcvd_reo[2].num,
  5567. pdev->stats.rx.rcvd_reo[3].num);
  5568. DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
  5569. pdev->stats.rx.rcvd_reo[0].bytes,
  5570. pdev->stats.rx.rcvd_reo[1].bytes,
  5571. pdev->stats.rx.rcvd_reo[2].bytes,
  5572. pdev->stats.rx.rcvd_reo[3].bytes);
  5573. DP_PRINT_STATS("Replenished:");
  5574. DP_PRINT_STATS(" Packets = %d",
  5575. pdev->stats.replenish.pkts.num);
  5576. DP_PRINT_STATS(" Bytes = %llu",
  5577. pdev->stats.replenish.pkts.bytes);
  5578. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  5579. pdev->stats.buf_freelist);
  5580. DP_PRINT_STATS(" Low threshold intr = %d",
  5581. pdev->stats.replenish.low_thresh_intrs);
  5582. DP_PRINT_STATS("Dropped:");
  5583. DP_PRINT_STATS(" msdu_not_done = %d",
  5584. pdev->stats.dropped.msdu_not_done);
  5585. DP_PRINT_STATS(" mon_rx_drop = %d",
  5586. pdev->stats.dropped.mon_rx_drop);
  5587. DP_PRINT_STATS(" mec_drop = %d",
  5588. pdev->stats.rx.mec_drop.num);
  5589. DP_PRINT_STATS(" Bytes = %llu",
  5590. pdev->stats.rx.mec_drop.bytes);
  5591. DP_PRINT_STATS("Sent To Stack:");
  5592. DP_PRINT_STATS(" Packets = %d",
  5593. pdev->stats.rx.to_stack.num);
  5594. DP_PRINT_STATS(" Bytes = %llu",
  5595. pdev->stats.rx.to_stack.bytes);
  5596. DP_PRINT_STATS("Multicast/Broadcast:");
  5597. DP_PRINT_STATS(" Packets = %d",
  5598. pdev->stats.rx.multicast.num);
  5599. DP_PRINT_STATS(" Bytes = %llu",
  5600. pdev->stats.rx.multicast.bytes);
  5601. DP_PRINT_STATS("Errors:");
  5602. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  5603. pdev->stats.replenish.rxdma_err);
  5604. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  5605. pdev->stats.err.desc_alloc_fail);
  5606. DP_PRINT_STATS(" IP checksum error = %d",
  5607. pdev->stats.err.ip_csum_err);
  5608. DP_PRINT_STATS(" TCP/UDP checksum error = %d",
  5609. pdev->stats.err.tcp_udp_csum_err);
  5610. /* Get bar_recv_cnt */
  5611. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  5612. DP_PRINT_STATS("BAR Received Count: = %d",
  5613. pdev->stats.rx.bar_recv_cnt);
  5614. }
  5615. /**
  5616. * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
  5617. * @pdev: DP_PDEV Handle
  5618. *
  5619. * Return: void
  5620. */
  5621. static inline void
  5622. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  5623. {
  5624. struct cdp_pdev_mon_stats *rx_mon_stats;
  5625. rx_mon_stats = &pdev->rx_mon_stats;
  5626. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  5627. dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
  5628. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  5629. rx_mon_stats->status_ppdu_done);
  5630. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  5631. rx_mon_stats->dest_ppdu_done);
  5632. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  5633. rx_mon_stats->dest_mpdu_done);
  5634. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  5635. rx_mon_stats->dest_mpdu_drop);
  5636. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  5637. rx_mon_stats->dup_mon_linkdesc_cnt);
  5638. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  5639. rx_mon_stats->dup_mon_buf_cnt);
  5640. }
  5641. /**
  5642. * dp_print_soc_tx_stats(): Print SOC level stats
  5643. * @soc DP_SOC Handle
  5644. *
  5645. * Return: void
  5646. */
  5647. static inline void
  5648. dp_print_soc_tx_stats(struct dp_soc *soc)
  5649. {
  5650. uint8_t desc_pool_id;
  5651. soc->stats.tx.desc_in_use = 0;
  5652. DP_PRINT_STATS("SOC Tx Stats:\n");
  5653. for (desc_pool_id = 0;
  5654. desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5655. desc_pool_id++)
  5656. soc->stats.tx.desc_in_use +=
  5657. soc->tx_desc[desc_pool_id].num_allocated;
  5658. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  5659. soc->stats.tx.desc_in_use);
  5660. DP_PRINT_STATS("Tx Invalid peer:");
  5661. DP_PRINT_STATS(" Packets = %d",
  5662. soc->stats.tx.tx_invalid_peer.num);
  5663. DP_PRINT_STATS(" Bytes = %llu",
  5664. soc->stats.tx.tx_invalid_peer.bytes);
  5665. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  5666. soc->stats.tx.tcl_ring_full[0],
  5667. soc->stats.tx.tcl_ring_full[1],
  5668. soc->stats.tx.tcl_ring_full[2]);
  5669. }
  5670. /**
  5671. * dp_print_soc_rx_stats: Print SOC level Rx stats
  5672. * @soc: DP_SOC Handle
  5673. *
  5674. * Return:void
  5675. */
  5676. static inline void
  5677. dp_print_soc_rx_stats(struct dp_soc *soc)
  5678. {
  5679. uint32_t i;
  5680. char reo_error[DP_REO_ERR_LENGTH];
  5681. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  5682. uint8_t index = 0;
  5683. DP_PRINT_STATS("No of AST Entries = %d", soc->num_ast_entries);
  5684. DP_PRINT_STATS("SOC Rx Stats:\n");
  5685. DP_PRINT_STATS("Fragmented packets: %u",
  5686. soc->stats.rx.rx_frags);
  5687. DP_PRINT_STATS("Reo reinjected packets: %u",
  5688. soc->stats.rx.reo_reinject);
  5689. DP_PRINT_STATS("Errors:\n");
  5690. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  5691. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  5692. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  5693. DP_PRINT_STATS("Invalid RBM = %d",
  5694. soc->stats.rx.err.invalid_rbm);
  5695. DP_PRINT_STATS("Invalid Vdev = %d",
  5696. soc->stats.rx.err.invalid_vdev);
  5697. DP_PRINT_STATS("Invalid Pdev = %d",
  5698. soc->stats.rx.err.invalid_pdev);
  5699. DP_PRINT_STATS("Invalid Peer = %d",
  5700. soc->stats.rx.err.rx_invalid_peer.num);
  5701. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  5702. soc->stats.rx.err.hal_ring_access_fail);
  5703. DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
  5704. DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
  5705. DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
  5706. DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
  5707. DP_PRINT_STATS("RX DUP DESC: %d",
  5708. soc->stats.rx.err.hal_reo_dest_dup);
  5709. DP_PRINT_STATS("RX REL DUP DESC: %d",
  5710. soc->stats.rx.err.hal_wbm_rel_dup);
  5711. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  5712. index += qdf_snprint(&rxdma_error[index],
  5713. DP_RXDMA_ERR_LENGTH - index,
  5714. " %d", soc->stats.rx.err.rxdma_error[i]);
  5715. }
  5716. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  5717. rxdma_error);
  5718. index = 0;
  5719. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  5720. index += qdf_snprint(&reo_error[index],
  5721. DP_REO_ERR_LENGTH - index,
  5722. " %d", soc->stats.rx.err.reo_error[i]);
  5723. }
  5724. DP_PRINT_STATS("REO Error(0-14):%s",
  5725. reo_error);
  5726. }
  5727. /**
  5728. * dp_srng_get_str_from_ring_type() - Return string name for a ring
  5729. * @ring_type: Ring
  5730. *
  5731. * Return: char const pointer
  5732. */
  5733. static inline const
  5734. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  5735. {
  5736. switch (ring_type) {
  5737. case REO_DST:
  5738. return "Reo_dst";
  5739. case REO_EXCEPTION:
  5740. return "Reo_exception";
  5741. case REO_CMD:
  5742. return "Reo_cmd";
  5743. case REO_REINJECT:
  5744. return "Reo_reinject";
  5745. case REO_STATUS:
  5746. return "Reo_status";
  5747. case WBM2SW_RELEASE:
  5748. return "wbm2sw_release";
  5749. case TCL_DATA:
  5750. return "tcl_data";
  5751. case TCL_CMD:
  5752. return "tcl_cmd";
  5753. case TCL_STATUS:
  5754. return "tcl_status";
  5755. case SW2WBM_RELEASE:
  5756. return "sw2wbm_release";
  5757. case RXDMA_BUF:
  5758. return "Rxdma_buf";
  5759. case RXDMA_DST:
  5760. return "Rxdma_dst";
  5761. case RXDMA_MONITOR_BUF:
  5762. return "Rxdma_monitor_buf";
  5763. case RXDMA_MONITOR_DESC:
  5764. return "Rxdma_monitor_desc";
  5765. case RXDMA_MONITOR_STATUS:
  5766. return "Rxdma_monitor_status";
  5767. default:
  5768. dp_err("Invalid ring type");
  5769. break;
  5770. }
  5771. return "Invalid";
  5772. }
  5773. /**
  5774. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  5775. * @soc: DP_SOC handle
  5776. * @srng: DP_SRNG handle
  5777. * @ring_name: SRNG name
  5778. * @ring_type: srng src/dst ring
  5779. *
  5780. * Return: void
  5781. */
  5782. static void
  5783. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  5784. enum hal_ring_type ring_type)
  5785. {
  5786. uint32_t tailp;
  5787. uint32_t headp;
  5788. int32_t hw_headp = -1;
  5789. int32_t hw_tailp = -1;
  5790. const char *ring_name;
  5791. struct hal_soc *hal_soc;
  5792. if (soc && srng && srng->hal_srng) {
  5793. hal_soc = (struct hal_soc *)soc->hal_soc;
  5794. ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
  5795. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  5796. DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
  5797. ring_name, headp, tailp);
  5798. hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
  5799. &hw_tailp, ring_type);
  5800. DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
  5801. ring_name, hw_headp, hw_tailp);
  5802. }
  5803. }
  5804. /**
  5805. * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
  5806. * on target
  5807. * @pdev: physical device handle
  5808. * @mac_id: mac id
  5809. *
  5810. * Return: void
  5811. */
  5812. static inline
  5813. void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
  5814. {
  5815. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  5816. dp_print_ring_stat_from_hal(pdev->soc,
  5817. &pdev->rxdma_mon_buf_ring[mac_id],
  5818. RXDMA_MONITOR_BUF);
  5819. dp_print_ring_stat_from_hal(pdev->soc,
  5820. &pdev->rxdma_mon_dst_ring[mac_id],
  5821. RXDMA_MONITOR_DST);
  5822. dp_print_ring_stat_from_hal(pdev->soc,
  5823. &pdev->rxdma_mon_desc_ring[mac_id],
  5824. RXDMA_MONITOR_DESC);
  5825. }
  5826. dp_print_ring_stat_from_hal(pdev->soc,
  5827. &pdev->rxdma_mon_status_ring[mac_id],
  5828. RXDMA_MONITOR_STATUS);
  5829. }
  5830. /**
  5831. * dp_print_ring_stats(): Print tail and head pointer
  5832. * @pdev: DP_PDEV handle
  5833. *
  5834. * Return:void
  5835. */
  5836. static inline void
  5837. dp_print_ring_stats(struct dp_pdev *pdev)
  5838. {
  5839. uint32_t i;
  5840. int mac_id;
  5841. dp_print_ring_stat_from_hal(pdev->soc,
  5842. &pdev->soc->reo_exception_ring,
  5843. REO_EXCEPTION);
  5844. dp_print_ring_stat_from_hal(pdev->soc,
  5845. &pdev->soc->reo_reinject_ring,
  5846. REO_REINJECT);
  5847. dp_print_ring_stat_from_hal(pdev->soc,
  5848. &pdev->soc->reo_cmd_ring,
  5849. REO_CMD);
  5850. dp_print_ring_stat_from_hal(pdev->soc,
  5851. &pdev->soc->reo_status_ring,
  5852. REO_STATUS);
  5853. dp_print_ring_stat_from_hal(pdev->soc,
  5854. &pdev->soc->rx_rel_ring,
  5855. WBM2SW_RELEASE);
  5856. dp_print_ring_stat_from_hal(pdev->soc,
  5857. &pdev->soc->tcl_cmd_ring,
  5858. TCL_CMD);
  5859. dp_print_ring_stat_from_hal(pdev->soc,
  5860. &pdev->soc->tcl_status_ring,
  5861. TCL_STATUS);
  5862. dp_print_ring_stat_from_hal(pdev->soc,
  5863. &pdev->soc->wbm_desc_rel_ring,
  5864. SW2WBM_RELEASE);
  5865. for (i = 0; i < MAX_REO_DEST_RINGS; i++)
  5866. dp_print_ring_stat_from_hal(pdev->soc,
  5867. &pdev->soc->reo_dest_ring[i],
  5868. REO_DST);
  5869. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
  5870. dp_print_ring_stat_from_hal(pdev->soc,
  5871. &pdev->soc->tcl_data_ring[i],
  5872. TCL_DATA);
  5873. for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
  5874. dp_print_ring_stat_from_hal(pdev->soc,
  5875. &pdev->soc->tx_comp_ring[i],
  5876. WBM2SW_RELEASE);
  5877. dp_print_ring_stat_from_hal(pdev->soc,
  5878. &pdev->rx_refill_buf_ring,
  5879. RXDMA_BUF);
  5880. dp_print_ring_stat_from_hal(pdev->soc,
  5881. &pdev->rx_refill_buf_ring2,
  5882. RXDMA_BUF);
  5883. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  5884. dp_print_ring_stat_from_hal(pdev->soc,
  5885. &pdev->rx_mac_buf_ring[i],
  5886. RXDMA_BUF);
  5887. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
  5888. dp_print_mon_ring_stat_from_hal(pdev, mac_id);
  5889. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
  5890. dp_print_ring_stat_from_hal(pdev->soc,
  5891. &pdev->rxdma_err_dst_ring[i],
  5892. RXDMA_DST);
  5893. }
  5894. /**
  5895. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  5896. * @vdev: DP_VDEV handle
  5897. *
  5898. * Return:void
  5899. */
  5900. static inline void
  5901. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  5902. {
  5903. struct dp_peer *peer = NULL;
  5904. if (!vdev || !vdev->pdev)
  5905. return;
  5906. DP_STATS_CLR(vdev->pdev);
  5907. DP_STATS_CLR(vdev->pdev->soc);
  5908. DP_STATS_CLR(vdev);
  5909. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5910. if (!peer)
  5911. return;
  5912. DP_STATS_CLR(peer);
  5913. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5914. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5915. &peer->stats, peer->peer_ids[0],
  5916. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  5917. #endif
  5918. }
  5919. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5920. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5921. &vdev->stats, vdev->vdev_id,
  5922. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5923. #endif
  5924. }
  5925. /**
  5926. * dp_print_common_rates_info(): Print common rate for tx or rx
  5927. * @pkt_type_array: rate type array contains rate info
  5928. *
  5929. * Return:void
  5930. */
  5931. static inline void
  5932. dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
  5933. {
  5934. uint8_t mcs, pkt_type;
  5935. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  5936. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  5937. if (!dp_rate_string[pkt_type][mcs].valid)
  5938. continue;
  5939. DP_PRINT_STATS(" %s = %d",
  5940. dp_rate_string[pkt_type][mcs].mcs_type,
  5941. pkt_type_array[pkt_type].mcs_count[mcs]);
  5942. }
  5943. DP_PRINT_STATS("\n");
  5944. }
  5945. }
  5946. /**
  5947. * dp_print_rx_rates(): Print Rx rate stats
  5948. * @vdev: DP_VDEV handle
  5949. *
  5950. * Return:void
  5951. */
  5952. static inline void
  5953. dp_print_rx_rates(struct dp_vdev *vdev)
  5954. {
  5955. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5956. uint8_t i;
  5957. uint8_t index = 0;
  5958. char nss[DP_NSS_LENGTH];
  5959. DP_PRINT_STATS("Rx Rate Info:\n");
  5960. dp_print_common_rates_info(pdev->stats.rx.pkt_type);
  5961. index = 0;
  5962. for (i = 0; i < SS_COUNT; i++) {
  5963. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5964. " %d", pdev->stats.rx.nss[i]);
  5965. }
  5966. DP_PRINT_STATS("NSS(1-8) = %s",
  5967. nss);
  5968. DP_PRINT_STATS("SGI ="
  5969. " 0.8us %d,"
  5970. " 0.4us %d,"
  5971. " 1.6us %d,"
  5972. " 3.2us %d,",
  5973. pdev->stats.rx.sgi_count[0],
  5974. pdev->stats.rx.sgi_count[1],
  5975. pdev->stats.rx.sgi_count[2],
  5976. pdev->stats.rx.sgi_count[3]);
  5977. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5978. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  5979. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  5980. DP_PRINT_STATS("Reception Type ="
  5981. " SU: %d,"
  5982. " MU_MIMO:%d,"
  5983. " MU_OFDMA:%d,"
  5984. " MU_OFDMA_MIMO:%d\n",
  5985. pdev->stats.rx.reception_type[0],
  5986. pdev->stats.rx.reception_type[1],
  5987. pdev->stats.rx.reception_type[2],
  5988. pdev->stats.rx.reception_type[3]);
  5989. DP_PRINT_STATS("Aggregation:\n");
  5990. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  5991. pdev->stats.rx.ampdu_cnt);
  5992. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  5993. pdev->stats.rx.non_ampdu_cnt);
  5994. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  5995. pdev->stats.rx.amsdu_cnt);
  5996. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  5997. pdev->stats.rx.non_amsdu_cnt);
  5998. }
  5999. /**
  6000. * dp_print_tx_rates(): Print tx rates
  6001. * @vdev: DP_VDEV handle
  6002. *
  6003. * Return:void
  6004. */
  6005. static inline void
  6006. dp_print_tx_rates(struct dp_vdev *vdev)
  6007. {
  6008. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6009. uint8_t index;
  6010. char nss[DP_NSS_LENGTH];
  6011. int nss_index;
  6012. DP_PRINT_STATS("Tx Rate Info:\n");
  6013. dp_print_common_rates_info(pdev->stats.tx.pkt_type);
  6014. DP_PRINT_STATS("SGI ="
  6015. " 0.8us %d"
  6016. " 0.4us %d"
  6017. " 1.6us %d"
  6018. " 3.2us %d",
  6019. pdev->stats.tx.sgi_count[0],
  6020. pdev->stats.tx.sgi_count[1],
  6021. pdev->stats.tx.sgi_count[2],
  6022. pdev->stats.tx.sgi_count[3]);
  6023. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  6024. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  6025. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  6026. index = 0;
  6027. for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
  6028. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6029. " %d", pdev->stats.tx.nss[nss_index]);
  6030. }
  6031. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  6032. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  6033. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  6034. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  6035. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  6036. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  6037. DP_PRINT_STATS("Aggregation:\n");
  6038. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  6039. pdev->stats.tx.amsdu_cnt);
  6040. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  6041. pdev->stats.tx.non_amsdu_cnt);
  6042. }
  6043. /**
  6044. * dp_print_peer_stats():print peer stats
  6045. * @peer: DP_PEER handle
  6046. *
  6047. * return void
  6048. */
  6049. static inline void dp_print_peer_stats(struct dp_peer *peer)
  6050. {
  6051. uint8_t i;
  6052. uint32_t index;
  6053. uint32_t j;
  6054. char nss[DP_NSS_LENGTH];
  6055. char mu_group_id[DP_MU_GROUP_LENGTH];
  6056. DP_PRINT_STATS("Node Tx Stats:\n");
  6057. DP_PRINT_STATS("Total Packet Completions = %d",
  6058. peer->stats.tx.comp_pkt.num);
  6059. DP_PRINT_STATS("Total Bytes Completions = %llu",
  6060. peer->stats.tx.comp_pkt.bytes);
  6061. DP_PRINT_STATS("Success Packets = %d",
  6062. peer->stats.tx.tx_success.num);
  6063. DP_PRINT_STATS("Success Bytes = %llu",
  6064. peer->stats.tx.tx_success.bytes);
  6065. DP_PRINT_STATS("Unicast Success Packets = %d",
  6066. peer->stats.tx.ucast.num);
  6067. DP_PRINT_STATS("Unicast Success Bytes = %llu",
  6068. peer->stats.tx.ucast.bytes);
  6069. DP_PRINT_STATS("Multicast Success Packets = %d",
  6070. peer->stats.tx.mcast.num);
  6071. DP_PRINT_STATS("Multicast Success Bytes = %llu",
  6072. peer->stats.tx.mcast.bytes);
  6073. DP_PRINT_STATS("Broadcast Success Packets = %d",
  6074. peer->stats.tx.bcast.num);
  6075. DP_PRINT_STATS("Broadcast Success Bytes = %llu",
  6076. peer->stats.tx.bcast.bytes);
  6077. DP_PRINT_STATS("Packets Failed = %d",
  6078. peer->stats.tx.tx_failed);
  6079. DP_PRINT_STATS("Packets In OFDMA = %d",
  6080. peer->stats.tx.ofdma);
  6081. DP_PRINT_STATS("Packets In STBC = %d",
  6082. peer->stats.tx.stbc);
  6083. DP_PRINT_STATS("Packets In LDPC = %d",
  6084. peer->stats.tx.ldpc);
  6085. DP_PRINT_STATS("Packet Retries = %d",
  6086. peer->stats.tx.retries);
  6087. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  6088. peer->stats.tx.amsdu_cnt);
  6089. DP_PRINT_STATS("Last Packet RSSI = %d",
  6090. peer->stats.tx.last_ack_rssi);
  6091. DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
  6092. peer->stats.tx.dropped.fw_rem.num);
  6093. DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
  6094. peer->stats.tx.dropped.fw_rem.bytes);
  6095. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  6096. peer->stats.tx.dropped.fw_rem_tx);
  6097. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  6098. peer->stats.tx.dropped.fw_rem_notx);
  6099. DP_PRINT_STATS("Dropped : Age Out = %d",
  6100. peer->stats.tx.dropped.age_out);
  6101. DP_PRINT_STATS("NAWDS : ");
  6102. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  6103. peer->stats.tx.nawds_mcast_drop);
  6104. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  6105. peer->stats.tx.nawds_mcast.num);
  6106. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
  6107. peer->stats.tx.nawds_mcast.bytes);
  6108. DP_PRINT_STATS("Rate Info:");
  6109. dp_print_common_rates_info(peer->stats.tx.pkt_type);
  6110. DP_PRINT_STATS("SGI = "
  6111. " 0.8us %d"
  6112. " 0.4us %d"
  6113. " 1.6us %d"
  6114. " 3.2us %d",
  6115. peer->stats.tx.sgi_count[0],
  6116. peer->stats.tx.sgi_count[1],
  6117. peer->stats.tx.sgi_count[2],
  6118. peer->stats.tx.sgi_count[3]);
  6119. DP_PRINT_STATS("Excess Retries per AC ");
  6120. DP_PRINT_STATS(" Best effort = %d",
  6121. peer->stats.tx.excess_retries_per_ac[0]);
  6122. DP_PRINT_STATS(" Background= %d",
  6123. peer->stats.tx.excess_retries_per_ac[1]);
  6124. DP_PRINT_STATS(" Video = %d",
  6125. peer->stats.tx.excess_retries_per_ac[2]);
  6126. DP_PRINT_STATS(" Voice = %d",
  6127. peer->stats.tx.excess_retries_per_ac[3]);
  6128. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  6129. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  6130. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  6131. index = 0;
  6132. for (i = 0; i < SS_COUNT; i++) {
  6133. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6134. " %d", peer->stats.tx.nss[i]);
  6135. }
  6136. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  6137. DP_PRINT_STATS("Transmit Type :");
  6138. DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d",
  6139. peer->stats.tx.transmit_type[0],
  6140. peer->stats.tx.transmit_type[1],
  6141. peer->stats.tx.transmit_type[2],
  6142. peer->stats.tx.transmit_type[3]);
  6143. for (i = 0; i < MAX_MU_GROUP_ID;) {
  6144. index = 0;
  6145. for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID;
  6146. j++) {
  6147. index += qdf_snprint(&mu_group_id[index],
  6148. DP_MU_GROUP_LENGTH - index,
  6149. " %d",
  6150. peer->stats.tx.mu_group_id[i]);
  6151. i++;
  6152. }
  6153. DP_PRINT_STATS("User position list for GID %02d->%d: [%s]",
  6154. i - DP_MU_GROUP_SHOW, i - 1, mu_group_id);
  6155. }
  6156. DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]",
  6157. peer->stats.tx.ru_start, peer->stats.tx.ru_tones);
  6158. DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:");
  6159. DP_PRINT_STATS("RU_26: %d", peer->stats.tx.ru_loc[0]);
  6160. DP_PRINT_STATS("RU 52: %d", peer->stats.tx.ru_loc[1]);
  6161. DP_PRINT_STATS("RU 106: %d", peer->stats.tx.ru_loc[2]);
  6162. DP_PRINT_STATS("RU 242: %d", peer->stats.tx.ru_loc[3]);
  6163. DP_PRINT_STATS("RU 484: %d", peer->stats.tx.ru_loc[4]);
  6164. DP_PRINT_STATS("RU 996: %d", peer->stats.tx.ru_loc[5]);
  6165. DP_PRINT_STATS("Aggregation:");
  6166. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  6167. peer->stats.tx.amsdu_cnt);
  6168. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  6169. peer->stats.tx.non_amsdu_cnt);
  6170. DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
  6171. DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
  6172. peer->stats.tx.tx_byte_rate);
  6173. DP_PRINT_STATS(" Data transmitted in last sec: %d",
  6174. peer->stats.tx.tx_data_rate);
  6175. DP_PRINT_STATS("Node Rx Stats:");
  6176. DP_PRINT_STATS("Packets Sent To Stack = %d",
  6177. peer->stats.rx.to_stack.num);
  6178. DP_PRINT_STATS("Bytes Sent To Stack = %llu",
  6179. peer->stats.rx.to_stack.bytes);
  6180. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  6181. DP_PRINT_STATS("Ring Id = %d", i);
  6182. DP_PRINT_STATS(" Packets Received = %d",
  6183. peer->stats.rx.rcvd_reo[i].num);
  6184. DP_PRINT_STATS(" Bytes Received = %llu",
  6185. peer->stats.rx.rcvd_reo[i].bytes);
  6186. }
  6187. DP_PRINT_STATS("Multicast Packets Received = %d",
  6188. peer->stats.rx.multicast.num);
  6189. DP_PRINT_STATS("Multicast Bytes Received = %llu",
  6190. peer->stats.rx.multicast.bytes);
  6191. DP_PRINT_STATS("Broadcast Packets Received = %d",
  6192. peer->stats.rx.bcast.num);
  6193. DP_PRINT_STATS("Broadcast Bytes Received = %llu",
  6194. peer->stats.rx.bcast.bytes);
  6195. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  6196. peer->stats.rx.intra_bss.pkts.num);
  6197. DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
  6198. peer->stats.rx.intra_bss.pkts.bytes);
  6199. DP_PRINT_STATS("Raw Packets Received = %d",
  6200. peer->stats.rx.raw.num);
  6201. DP_PRINT_STATS("Raw Bytes Received = %llu",
  6202. peer->stats.rx.raw.bytes);
  6203. DP_PRINT_STATS("Errors: MIC Errors = %d",
  6204. peer->stats.rx.err.mic_err);
  6205. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  6206. peer->stats.rx.err.decrypt_err);
  6207. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  6208. peer->stats.rx.non_ampdu_cnt);
  6209. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  6210. peer->stats.rx.ampdu_cnt);
  6211. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  6212. peer->stats.rx.non_amsdu_cnt);
  6213. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  6214. peer->stats.rx.amsdu_cnt);
  6215. DP_PRINT_STATS("NAWDS : ");
  6216. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  6217. peer->stats.rx.nawds_mcast_drop);
  6218. DP_PRINT_STATS("SGI ="
  6219. " 0.8us %d"
  6220. " 0.4us %d"
  6221. " 1.6us %d"
  6222. " 3.2us %d",
  6223. peer->stats.rx.sgi_count[0],
  6224. peer->stats.rx.sgi_count[1],
  6225. peer->stats.rx.sgi_count[2],
  6226. peer->stats.rx.sgi_count[3]);
  6227. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  6228. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  6229. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  6230. DP_PRINT_STATS("Reception Type ="
  6231. " SU %d,"
  6232. " MU_MIMO %d,"
  6233. " MU_OFDMA %d,"
  6234. " MU_OFDMA_MIMO %d",
  6235. peer->stats.rx.reception_type[0],
  6236. peer->stats.rx.reception_type[1],
  6237. peer->stats.rx.reception_type[2],
  6238. peer->stats.rx.reception_type[3]);
  6239. dp_print_common_rates_info(peer->stats.rx.pkt_type);
  6240. index = 0;
  6241. for (i = 0; i < SS_COUNT; i++) {
  6242. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6243. " %d", peer->stats.rx.nss[i]);
  6244. }
  6245. DP_PRINT_STATS("NSS(1-8) = %s",
  6246. nss);
  6247. DP_PRINT_STATS("Aggregation:");
  6248. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  6249. peer->stats.rx.ampdu_cnt);
  6250. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  6251. peer->stats.rx.non_ampdu_cnt);
  6252. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  6253. peer->stats.rx.amsdu_cnt);
  6254. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  6255. peer->stats.rx.non_amsdu_cnt);
  6256. DP_PRINT_STATS("Bytes and Packets received in last one sec:");
  6257. DP_PRINT_STATS(" Bytes received in last sec: %d",
  6258. peer->stats.rx.rx_byte_rate);
  6259. DP_PRINT_STATS(" Data received in last sec: %d",
  6260. peer->stats.rx.rx_data_rate);
  6261. }
  6262. /*
  6263. * dp_get_host_peer_stats()- function to print peer stats
  6264. * @pdev_handle: DP_PDEV handle
  6265. * @mac_addr: mac address of the peer
  6266. *
  6267. * Return: void
  6268. */
  6269. static void
  6270. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  6271. {
  6272. struct dp_peer *peer;
  6273. uint8_t local_id;
  6274. if (!mac_addr) {
  6275. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6276. "Invalid MAC address\n");
  6277. return;
  6278. }
  6279. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  6280. &local_id);
  6281. if (!peer) {
  6282. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6283. "%s: Invalid peer\n", __func__);
  6284. return;
  6285. }
  6286. /* Making sure the peer is for the specific pdev */
  6287. if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
  6288. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6289. "%s: Peer is not for this pdev\n", __func__);
  6290. return;
  6291. }
  6292. dp_print_peer_stats(peer);
  6293. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6294. }
  6295. /**
  6296. * dp_print_soc_cfg_params()- Dump soc wlan config parameters
  6297. * @soc_handle: Soc handle
  6298. *
  6299. * Return: void
  6300. */
  6301. static void
  6302. dp_print_soc_cfg_params(struct dp_soc *soc)
  6303. {
  6304. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  6305. uint8_t index = 0, i = 0;
  6306. char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
  6307. int num_of_int_contexts;
  6308. if (!soc) {
  6309. dp_err("Context is null");
  6310. return;
  6311. }
  6312. soc_cfg_ctx = soc->wlan_cfg_ctx;
  6313. if (!soc_cfg_ctx) {
  6314. dp_err("Context is null");
  6315. return;
  6316. }
  6317. num_of_int_contexts =
  6318. wlan_cfg_get_num_contexts(soc_cfg_ctx);
  6319. DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
  6320. soc_cfg_ctx->num_int_ctxts);
  6321. DP_TRACE_STATS(DEBUG, "Max clients: %u",
  6322. soc_cfg_ctx->max_clients);
  6323. DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
  6324. soc_cfg_ctx->max_alloc_size);
  6325. DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
  6326. soc_cfg_ctx->per_pdev_tx_ring);
  6327. DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
  6328. soc_cfg_ctx->num_tcl_data_rings);
  6329. DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
  6330. soc_cfg_ctx->per_pdev_rx_ring);
  6331. DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
  6332. soc_cfg_ctx->per_pdev_lmac_ring);
  6333. DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
  6334. soc_cfg_ctx->num_reo_dest_rings);
  6335. DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
  6336. soc_cfg_ctx->num_tx_desc_pool);
  6337. DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
  6338. soc_cfg_ctx->num_tx_ext_desc_pool);
  6339. DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
  6340. soc_cfg_ctx->num_tx_desc);
  6341. DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
  6342. soc_cfg_ctx->num_tx_ext_desc);
  6343. DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
  6344. soc_cfg_ctx->htt_packet_type);
  6345. DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
  6346. soc_cfg_ctx->max_peer_id);
  6347. DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
  6348. soc_cfg_ctx->tx_ring_size);
  6349. DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
  6350. soc_cfg_ctx->tx_comp_ring_size);
  6351. DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
  6352. soc_cfg_ctx->tx_comp_ring_size_nss);
  6353. DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
  6354. soc_cfg_ctx->int_batch_threshold_tx);
  6355. DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
  6356. soc_cfg_ctx->int_timer_threshold_tx);
  6357. DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
  6358. soc_cfg_ctx->int_batch_threshold_rx);
  6359. DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
  6360. soc_cfg_ctx->int_timer_threshold_rx);
  6361. DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
  6362. soc_cfg_ctx->int_batch_threshold_other);
  6363. DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
  6364. soc_cfg_ctx->int_timer_threshold_other);
  6365. for (i = 0; i < num_of_int_contexts; i++) {
  6366. index += qdf_snprint(&ring_mask[index],
  6367. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6368. " %d",
  6369. soc_cfg_ctx->int_tx_ring_mask[i]);
  6370. }
  6371. DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
  6372. num_of_int_contexts, ring_mask);
  6373. index = 0;
  6374. for (i = 0; i < num_of_int_contexts; i++) {
  6375. index += qdf_snprint(&ring_mask[index],
  6376. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6377. " %d",
  6378. soc_cfg_ctx->int_rx_ring_mask[i]);
  6379. }
  6380. DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
  6381. num_of_int_contexts, ring_mask);
  6382. index = 0;
  6383. for (i = 0; i < num_of_int_contexts; i++) {
  6384. index += qdf_snprint(&ring_mask[index],
  6385. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6386. " %d",
  6387. soc_cfg_ctx->int_rx_mon_ring_mask[i]);
  6388. }
  6389. DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
  6390. num_of_int_contexts, ring_mask);
  6391. index = 0;
  6392. for (i = 0; i < num_of_int_contexts; i++) {
  6393. index += qdf_snprint(&ring_mask[index],
  6394. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6395. " %d",
  6396. soc_cfg_ctx->int_rx_err_ring_mask[i]);
  6397. }
  6398. DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
  6399. num_of_int_contexts, ring_mask);
  6400. index = 0;
  6401. for (i = 0; i < num_of_int_contexts; i++) {
  6402. index += qdf_snprint(&ring_mask[index],
  6403. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6404. " %d",
  6405. soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
  6406. }
  6407. DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
  6408. num_of_int_contexts, ring_mask);
  6409. index = 0;
  6410. for (i = 0; i < num_of_int_contexts; i++) {
  6411. index += qdf_snprint(&ring_mask[index],
  6412. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6413. " %d",
  6414. soc_cfg_ctx->int_reo_status_ring_mask[i]);
  6415. }
  6416. DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
  6417. num_of_int_contexts, ring_mask);
  6418. index = 0;
  6419. for (i = 0; i < num_of_int_contexts; i++) {
  6420. index += qdf_snprint(&ring_mask[index],
  6421. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6422. " %d",
  6423. soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
  6424. }
  6425. DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
  6426. num_of_int_contexts, ring_mask);
  6427. index = 0;
  6428. for (i = 0; i < num_of_int_contexts; i++) {
  6429. index += qdf_snprint(&ring_mask[index],
  6430. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6431. " %d",
  6432. soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
  6433. }
  6434. DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
  6435. num_of_int_contexts, ring_mask);
  6436. DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
  6437. soc_cfg_ctx->rx_hash);
  6438. DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
  6439. soc_cfg_ctx->tso_enabled);
  6440. DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
  6441. soc_cfg_ctx->lro_enabled);
  6442. DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
  6443. soc_cfg_ctx->sg_enabled);
  6444. DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
  6445. soc_cfg_ctx->gro_enabled);
  6446. DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
  6447. soc_cfg_ctx->rawmode_enabled);
  6448. DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
  6449. soc_cfg_ctx->peer_flow_ctrl_enabled);
  6450. DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
  6451. soc_cfg_ctx->napi_enabled);
  6452. DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
  6453. soc_cfg_ctx->tcp_udp_checksumoffload);
  6454. DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
  6455. soc_cfg_ctx->defrag_timeout_check);
  6456. DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
  6457. soc_cfg_ctx->rx_defrag_min_timeout);
  6458. DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
  6459. soc_cfg_ctx->wbm_release_ring);
  6460. DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
  6461. soc_cfg_ctx->tcl_cmd_ring);
  6462. DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
  6463. soc_cfg_ctx->tcl_status_ring);
  6464. DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
  6465. soc_cfg_ctx->reo_reinject_ring);
  6466. DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
  6467. soc_cfg_ctx->rx_release_ring);
  6468. DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
  6469. soc_cfg_ctx->reo_exception_ring);
  6470. DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
  6471. soc_cfg_ctx->reo_cmd_ring);
  6472. DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
  6473. soc_cfg_ctx->reo_status_ring);
  6474. DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
  6475. soc_cfg_ctx->rxdma_refill_ring);
  6476. DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
  6477. soc_cfg_ctx->rxdma_err_dst_ring);
  6478. }
  6479. /**
  6480. * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
  6481. * @pdev_handle: DP pdev handle
  6482. *
  6483. * Return - void
  6484. */
  6485. static void
  6486. dp_print_pdev_cfg_params(struct dp_pdev *pdev)
  6487. {
  6488. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  6489. if (!pdev) {
  6490. dp_err("Context is null");
  6491. return;
  6492. }
  6493. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  6494. if (!pdev_cfg_ctx) {
  6495. dp_err("Context is null");
  6496. return;
  6497. }
  6498. DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
  6499. pdev_cfg_ctx->rx_dma_buf_ring_size);
  6500. DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
  6501. pdev_cfg_ctx->dma_mon_buf_ring_size);
  6502. DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
  6503. pdev_cfg_ctx->dma_mon_dest_ring_size);
  6504. DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
  6505. pdev_cfg_ctx->dma_mon_status_ring_size);
  6506. DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
  6507. pdev_cfg_ctx->rxdma_monitor_desc_ring);
  6508. DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
  6509. pdev_cfg_ctx->num_mac_rings);
  6510. }
  6511. /**
  6512. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6513. *
  6514. * Return: None
  6515. */
  6516. static void dp_txrx_stats_help(void)
  6517. {
  6518. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6519. dp_info("stats_option:");
  6520. dp_info(" 1 -- HTT Tx Statistics");
  6521. dp_info(" 2 -- HTT Rx Statistics");
  6522. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6523. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6524. dp_info(" 5 -- HTT Error Statistics");
  6525. dp_info(" 6 -- HTT TQM Statistics");
  6526. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6527. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6528. dp_info(" 9 -- HTT Tx Rate Statistics");
  6529. dp_info(" 10 -- HTT Rx Rate Statistics");
  6530. dp_info(" 11 -- HTT Peer Statistics");
  6531. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6532. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6533. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6534. dp_info(" 15 -- HTT SRNG Statistics");
  6535. dp_info(" 16 -- HTT SFM Info Statistics");
  6536. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6537. dp_info(" 18 -- HTT Peer List Details");
  6538. dp_info(" 20 -- Clear Host Statistics");
  6539. dp_info(" 21 -- Host Rx Rate Statistics");
  6540. dp_info(" 22 -- Host Tx Rate Statistics");
  6541. dp_info(" 23 -- Host Tx Statistics");
  6542. dp_info(" 24 -- Host Rx Statistics");
  6543. dp_info(" 25 -- Host AST Statistics");
  6544. dp_info(" 26 -- Host SRNG PTR Statistics");
  6545. dp_info(" 27 -- Host Mon Statistics");
  6546. dp_info(" 28 -- Host REO Queue Statistics");
  6547. dp_info(" 29 -- Host Soc cfg param Statistics");
  6548. dp_info(" 30 -- Host pdev cfg param Statistics");
  6549. }
  6550. /**
  6551. * dp_print_host_stats()- Function to print the stats aggregated at host
  6552. * @vdev_handle: DP_VDEV handle
  6553. * @type: host stats type
  6554. *
  6555. * Return: 0 on success, print error message in case of failure
  6556. */
  6557. static int
  6558. dp_print_host_stats(struct cdp_vdev *vdev_handle,
  6559. struct cdp_txrx_stats_req *req)
  6560. {
  6561. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6562. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6563. enum cdp_host_txrx_stats type =
  6564. dp_stats_mapping_table[req->stats][STATS_HOST];
  6565. dp_aggregate_pdev_stats(pdev);
  6566. switch (type) {
  6567. case TXRX_CLEAR_STATS:
  6568. dp_txrx_host_stats_clr(vdev);
  6569. break;
  6570. case TXRX_RX_RATE_STATS:
  6571. dp_print_rx_rates(vdev);
  6572. break;
  6573. case TXRX_TX_RATE_STATS:
  6574. dp_print_tx_rates(vdev);
  6575. break;
  6576. case TXRX_TX_HOST_STATS:
  6577. dp_print_pdev_tx_stats(pdev);
  6578. dp_print_soc_tx_stats(pdev->soc);
  6579. break;
  6580. case TXRX_RX_HOST_STATS:
  6581. dp_print_pdev_rx_stats(pdev);
  6582. dp_print_soc_rx_stats(pdev->soc);
  6583. break;
  6584. case TXRX_AST_STATS:
  6585. dp_print_ast_stats(pdev->soc);
  6586. dp_print_peer_table(vdev);
  6587. break;
  6588. case TXRX_SRNG_PTR_STATS:
  6589. dp_print_ring_stats(pdev);
  6590. break;
  6591. case TXRX_RX_MON_STATS:
  6592. dp_print_pdev_rx_mon_stats(pdev);
  6593. break;
  6594. case TXRX_REO_QUEUE_STATS:
  6595. dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
  6596. break;
  6597. case TXRX_SOC_CFG_PARAMS:
  6598. dp_print_soc_cfg_params(pdev->soc);
  6599. break;
  6600. case TXRX_PDEV_CFG_PARAMS:
  6601. dp_print_pdev_cfg_params(pdev);
  6602. break;
  6603. default:
  6604. dp_info("Wrong Input For TxRx Host Stats");
  6605. dp_txrx_stats_help();
  6606. break;
  6607. }
  6608. return 0;
  6609. }
  6610. /*
  6611. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6612. * @pdev: DP_PDEV handle
  6613. *
  6614. * Return: void
  6615. */
  6616. static void
  6617. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6618. {
  6619. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6620. int mac_id;
  6621. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  6622. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6623. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6624. pdev->pdev_id);
  6625. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6626. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6627. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6628. }
  6629. }
  6630. /*
  6631. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6632. * @pdev: DP_PDEV handle
  6633. *
  6634. * Return: void
  6635. */
  6636. static void
  6637. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6638. {
  6639. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6640. int mac_id;
  6641. htt_tlv_filter.mpdu_start = 1;
  6642. htt_tlv_filter.msdu_start = 0;
  6643. htt_tlv_filter.packet = 0;
  6644. htt_tlv_filter.msdu_end = 0;
  6645. htt_tlv_filter.mpdu_end = 0;
  6646. htt_tlv_filter.attention = 0;
  6647. htt_tlv_filter.ppdu_start = 1;
  6648. htt_tlv_filter.ppdu_end = 1;
  6649. htt_tlv_filter.ppdu_end_user_stats = 1;
  6650. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6651. htt_tlv_filter.ppdu_end_status_done = 1;
  6652. htt_tlv_filter.enable_fp = 1;
  6653. htt_tlv_filter.enable_md = 0;
  6654. if (pdev->neighbour_peers_added &&
  6655. pdev->soc->hw_nac_monitor_support) {
  6656. htt_tlv_filter.enable_md = 1;
  6657. htt_tlv_filter.packet_header = 1;
  6658. }
  6659. if (pdev->mcopy_mode) {
  6660. htt_tlv_filter.packet_header = 1;
  6661. htt_tlv_filter.enable_mo = 1;
  6662. }
  6663. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6664. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6665. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6666. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6667. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6668. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6669. if (pdev->neighbour_peers_added &&
  6670. pdev->soc->hw_nac_monitor_support)
  6671. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6672. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6673. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6674. pdev->pdev_id);
  6675. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6676. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6677. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6678. }
  6679. }
  6680. /*
  6681. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6682. * modes are enabled or not.
  6683. * @dp_pdev: dp pdev handle.
  6684. *
  6685. * Return: bool
  6686. */
  6687. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6688. {
  6689. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6690. !pdev->mcopy_mode)
  6691. return true;
  6692. else
  6693. return false;
  6694. }
  6695. /*
  6696. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6697. *@pdev_handle: DP_PDEV handle.
  6698. *@val: Provided value.
  6699. *
  6700. *Return: 0 for success. nonzero for failure.
  6701. */
  6702. static QDF_STATUS
  6703. dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
  6704. {
  6705. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6706. switch (val) {
  6707. case CDP_BPR_DISABLE:
  6708. pdev->bpr_enable = CDP_BPR_DISABLE;
  6709. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6710. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6711. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6712. } else if (pdev->enhanced_stats_en &&
  6713. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6714. !pdev->pktlog_ppdu_stats) {
  6715. dp_h2t_cfg_stats_msg_send(pdev,
  6716. DP_PPDU_STATS_CFG_ENH_STATS,
  6717. pdev->pdev_id);
  6718. }
  6719. break;
  6720. case CDP_BPR_ENABLE:
  6721. pdev->bpr_enable = CDP_BPR_ENABLE;
  6722. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6723. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6724. dp_h2t_cfg_stats_msg_send(pdev,
  6725. DP_PPDU_STATS_CFG_BPR,
  6726. pdev->pdev_id);
  6727. } else if (pdev->enhanced_stats_en &&
  6728. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6729. !pdev->pktlog_ppdu_stats) {
  6730. dp_h2t_cfg_stats_msg_send(pdev,
  6731. DP_PPDU_STATS_CFG_BPR_ENH,
  6732. pdev->pdev_id);
  6733. } else if (pdev->pktlog_ppdu_stats) {
  6734. dp_h2t_cfg_stats_msg_send(pdev,
  6735. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6736. pdev->pdev_id);
  6737. }
  6738. break;
  6739. default:
  6740. break;
  6741. }
  6742. return QDF_STATUS_SUCCESS;
  6743. }
  6744. /*
  6745. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6746. * @pdev_handle: DP_PDEV handle
  6747. * @val: user provided value
  6748. *
  6749. * Return: 0 for success. nonzero for failure.
  6750. */
  6751. static QDF_STATUS
  6752. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6753. {
  6754. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6755. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6756. if (pdev->mcopy_mode)
  6757. dp_reset_monitor_mode(pdev_handle);
  6758. switch (val) {
  6759. case 0:
  6760. pdev->tx_sniffer_enable = 0;
  6761. pdev->mcopy_mode = 0;
  6762. pdev->monitor_configured = false;
  6763. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6764. !pdev->bpr_enable) {
  6765. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6766. dp_ppdu_ring_reset(pdev);
  6767. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6768. dp_h2t_cfg_stats_msg_send(pdev,
  6769. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6770. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6771. dp_h2t_cfg_stats_msg_send(pdev,
  6772. DP_PPDU_STATS_CFG_BPR_ENH,
  6773. pdev->pdev_id);
  6774. } else {
  6775. dp_h2t_cfg_stats_msg_send(pdev,
  6776. DP_PPDU_STATS_CFG_BPR,
  6777. pdev->pdev_id);
  6778. }
  6779. break;
  6780. case 1:
  6781. pdev->tx_sniffer_enable = 1;
  6782. pdev->mcopy_mode = 0;
  6783. pdev->monitor_configured = false;
  6784. if (!pdev->pktlog_ppdu_stats)
  6785. dp_h2t_cfg_stats_msg_send(pdev,
  6786. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6787. break;
  6788. case 2:
  6789. if (pdev->monitor_vdev) {
  6790. status = QDF_STATUS_E_RESOURCES;
  6791. break;
  6792. }
  6793. pdev->mcopy_mode = 1;
  6794. dp_pdev_configure_monitor_rings(pdev);
  6795. pdev->monitor_configured = true;
  6796. pdev->tx_sniffer_enable = 0;
  6797. if (!pdev->pktlog_ppdu_stats)
  6798. dp_h2t_cfg_stats_msg_send(pdev,
  6799. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6800. break;
  6801. default:
  6802. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6803. "Invalid value");
  6804. break;
  6805. }
  6806. return status;
  6807. }
  6808. /*
  6809. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6810. * @pdev_handle: DP_PDEV handle
  6811. *
  6812. * Return: void
  6813. */
  6814. static void
  6815. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6816. {
  6817. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6818. if (pdev->enhanced_stats_en == 0)
  6819. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6820. pdev->enhanced_stats_en = 1;
  6821. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6822. !pdev->monitor_vdev)
  6823. dp_ppdu_ring_cfg(pdev);
  6824. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6825. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6826. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6827. dp_h2t_cfg_stats_msg_send(pdev,
  6828. DP_PPDU_STATS_CFG_BPR_ENH,
  6829. pdev->pdev_id);
  6830. }
  6831. }
  6832. /*
  6833. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6834. * @pdev_handle: DP_PDEV handle
  6835. *
  6836. * Return: void
  6837. */
  6838. static void
  6839. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6840. {
  6841. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6842. if (pdev->enhanced_stats_en == 1)
  6843. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6844. pdev->enhanced_stats_en = 0;
  6845. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6846. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6847. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6848. dp_h2t_cfg_stats_msg_send(pdev,
  6849. DP_PPDU_STATS_CFG_BPR,
  6850. pdev->pdev_id);
  6851. }
  6852. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6853. !pdev->monitor_vdev)
  6854. dp_ppdu_ring_reset(pdev);
  6855. }
  6856. /*
  6857. * dp_get_fw_peer_stats()- function to print peer stats
  6858. * @pdev_handle: DP_PDEV handle
  6859. * @mac_addr: mac address of the peer
  6860. * @cap: Type of htt stats requested
  6861. * @is_wait: if set, wait on completion from firmware response
  6862. *
  6863. * Currently Supporting only MAC ID based requests Only
  6864. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6865. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6866. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6867. *
  6868. * Return: void
  6869. */
  6870. static void
  6871. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  6872. uint32_t cap, uint32_t is_wait)
  6873. {
  6874. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6875. int i;
  6876. uint32_t config_param0 = 0;
  6877. uint32_t config_param1 = 0;
  6878. uint32_t config_param2 = 0;
  6879. uint32_t config_param3 = 0;
  6880. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6881. config_param0 |= (1 << (cap + 1));
  6882. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6883. config_param1 |= (1 << i);
  6884. }
  6885. config_param2 |= (mac_addr[0] & 0x000000ff);
  6886. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6887. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6888. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6889. config_param3 |= (mac_addr[4] & 0x000000ff);
  6890. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6891. if (is_wait) {
  6892. qdf_event_reset(&pdev->fw_peer_stats_event);
  6893. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6894. config_param0, config_param1,
  6895. config_param2, config_param3,
  6896. 0, 1, 0);
  6897. qdf_wait_single_event(&pdev->fw_peer_stats_event,
  6898. DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
  6899. } else {
  6900. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6901. config_param0, config_param1,
  6902. config_param2, config_param3,
  6903. 0, 0, 0);
  6904. }
  6905. }
  6906. /* This struct definition will be removed from here
  6907. * once it get added in FW headers*/
  6908. struct httstats_cmd_req {
  6909. uint32_t config_param0;
  6910. uint32_t config_param1;
  6911. uint32_t config_param2;
  6912. uint32_t config_param3;
  6913. int cookie;
  6914. u_int8_t stats_id;
  6915. };
  6916. /*
  6917. * dp_get_htt_stats: function to process the httstas request
  6918. * @pdev_handle: DP pdev handle
  6919. * @data: pointer to request data
  6920. * @data_len: length for request data
  6921. *
  6922. * return: void
  6923. */
  6924. static void
  6925. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  6926. {
  6927. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6928. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6929. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6930. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6931. req->config_param0, req->config_param1,
  6932. req->config_param2, req->config_param3,
  6933. req->cookie, 0, 0);
  6934. }
  6935. /*
  6936. * dp_set_pdev_param: function to set parameters in pdev
  6937. * @pdev_handle: DP pdev handle
  6938. * @param: parameter type to be set
  6939. * @val: value of parameter to be set
  6940. *
  6941. * Return: 0 for success. nonzero for failure.
  6942. */
  6943. static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6944. enum cdp_pdev_param_type param,
  6945. uint8_t val)
  6946. {
  6947. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6948. switch (param) {
  6949. case CDP_CONFIG_DEBUG_SNIFFER:
  6950. return dp_config_debug_sniffer(pdev_handle, val);
  6951. case CDP_CONFIG_BPR_ENABLE:
  6952. return dp_set_bpr_enable(pdev_handle, val);
  6953. case CDP_CONFIG_PRIMARY_RADIO:
  6954. pdev->is_primary = val;
  6955. break;
  6956. default:
  6957. return QDF_STATUS_E_INVAL;
  6958. }
  6959. return QDF_STATUS_SUCCESS;
  6960. }
  6961. /*
  6962. * dp_get_vdev_param: function to get parameters from vdev
  6963. * @param: parameter type to get value
  6964. *
  6965. * return: void
  6966. */
  6967. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  6968. enum cdp_vdev_param_type param)
  6969. {
  6970. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6971. uint32_t val;
  6972. switch (param) {
  6973. case CDP_ENABLE_WDS:
  6974. val = vdev->wds_enabled;
  6975. break;
  6976. case CDP_ENABLE_MEC:
  6977. val = vdev->mec_enabled;
  6978. break;
  6979. case CDP_ENABLE_DA_WAR:
  6980. val = vdev->pdev->soc->da_war_enabled;
  6981. break;
  6982. default:
  6983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6984. "param value %d is wrong\n",
  6985. param);
  6986. val = -1;
  6987. break;
  6988. }
  6989. return val;
  6990. }
  6991. /*
  6992. * dp_set_vdev_param: function to set parameters in vdev
  6993. * @param: parameter type to be set
  6994. * @val: value of parameter to be set
  6995. *
  6996. * return: void
  6997. */
  6998. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  6999. enum cdp_vdev_param_type param, uint32_t val)
  7000. {
  7001. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7002. switch (param) {
  7003. case CDP_ENABLE_WDS:
  7004. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7005. "wds_enable %d for vdev(%p) id(%d)\n",
  7006. val, vdev, vdev->vdev_id);
  7007. vdev->wds_enabled = val;
  7008. break;
  7009. case CDP_ENABLE_MEC:
  7010. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7011. "mec_enable %d for vdev(%p) id(%d)\n",
  7012. val, vdev, vdev->vdev_id);
  7013. vdev->mec_enabled = val;
  7014. break;
  7015. case CDP_ENABLE_DA_WAR:
  7016. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7017. "da_war_enable %d for vdev(%p) id(%d)\n",
  7018. val, vdev, vdev->vdev_id);
  7019. vdev->pdev->soc->da_war_enabled = val;
  7020. dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
  7021. vdev->pdev->soc));
  7022. break;
  7023. case CDP_ENABLE_NAWDS:
  7024. vdev->nawds_enabled = val;
  7025. break;
  7026. case CDP_ENABLE_MCAST_EN:
  7027. vdev->mcast_enhancement_en = val;
  7028. break;
  7029. case CDP_ENABLE_PROXYSTA:
  7030. vdev->proxysta_vdev = val;
  7031. break;
  7032. case CDP_UPDATE_TDLS_FLAGS:
  7033. vdev->tdls_link_connected = val;
  7034. break;
  7035. case CDP_CFG_WDS_AGING_TIMER:
  7036. if (val == 0)
  7037. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  7038. else if (val != vdev->wds_aging_timer_val)
  7039. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  7040. vdev->wds_aging_timer_val = val;
  7041. break;
  7042. case CDP_ENABLE_AP_BRIDGE:
  7043. if (wlan_op_mode_sta != vdev->opmode)
  7044. vdev->ap_bridge_enabled = val;
  7045. else
  7046. vdev->ap_bridge_enabled = false;
  7047. break;
  7048. case CDP_ENABLE_CIPHER:
  7049. vdev->sec_type = val;
  7050. break;
  7051. case CDP_ENABLE_QWRAP_ISOLATION:
  7052. vdev->isolation_vdev = val;
  7053. break;
  7054. default:
  7055. break;
  7056. }
  7057. dp_tx_vdev_update_search_flags(vdev);
  7058. }
  7059. /**
  7060. * dp_peer_set_nawds: set nawds bit in peer
  7061. * @peer_handle: pointer to peer
  7062. * @value: enable/disable nawds
  7063. *
  7064. * return: void
  7065. */
  7066. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  7067. {
  7068. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7069. peer->nawds_enabled = value;
  7070. }
  7071. /*
  7072. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  7073. * @vdev_handle: DP_VDEV handle
  7074. * @map_id:ID of map that needs to be updated
  7075. *
  7076. * Return: void
  7077. */
  7078. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  7079. uint8_t map_id)
  7080. {
  7081. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7082. vdev->dscp_tid_map_id = map_id;
  7083. return;
  7084. }
  7085. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  7086. * @peer_handle: DP pdev handle
  7087. *
  7088. * return : cdp_pdev_stats pointer
  7089. */
  7090. static struct cdp_pdev_stats*
  7091. dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
  7092. {
  7093. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7094. dp_aggregate_pdev_stats(pdev);
  7095. return &pdev->stats;
  7096. }
  7097. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  7098. * @peer_handle: DP_PEER handle
  7099. *
  7100. * return : cdp_peer_stats pointer
  7101. */
  7102. static struct cdp_peer_stats*
  7103. dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
  7104. {
  7105. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7106. qdf_assert(peer);
  7107. return &peer->stats;
  7108. }
  7109. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  7110. * @peer_handle: DP_PEER handle
  7111. *
  7112. * return : void
  7113. */
  7114. static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
  7115. {
  7116. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7117. qdf_assert(peer);
  7118. qdf_mem_zero(&peer->stats, sizeof(peer->stats));
  7119. }
  7120. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  7121. * @vdev_handle: DP_VDEV handle
  7122. * @buf: buffer for vdev stats
  7123. *
  7124. * return : int
  7125. */
  7126. static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
  7127. bool is_aggregate)
  7128. {
  7129. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7130. struct cdp_vdev_stats *vdev_stats;
  7131. struct dp_pdev *pdev;
  7132. struct dp_soc *soc;
  7133. if (!vdev)
  7134. return 1;
  7135. pdev = vdev->pdev;
  7136. if (!pdev)
  7137. return 1;
  7138. soc = pdev->soc;
  7139. vdev_stats = (struct cdp_vdev_stats *)buf;
  7140. if (is_aggregate) {
  7141. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  7142. dp_aggregate_vdev_stats(vdev, buf);
  7143. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  7144. } else {
  7145. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  7146. }
  7147. return 0;
  7148. }
  7149. /*
  7150. * dp_get_total_per(): get total per
  7151. * @pdev_handle: DP_PDEV handle
  7152. *
  7153. * Return: % error rate using retries per packet and success packets
  7154. */
  7155. static int dp_get_total_per(struct cdp_pdev *pdev_handle)
  7156. {
  7157. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7158. dp_aggregate_pdev_stats(pdev);
  7159. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  7160. return 0;
  7161. return ((pdev->stats.tx.retries * 100) /
  7162. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  7163. }
  7164. /*
  7165. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  7166. * @pdev_handle: DP_PDEV handle
  7167. * @buf: to hold pdev_stats
  7168. *
  7169. * Return: int
  7170. */
  7171. static int
  7172. dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
  7173. {
  7174. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7175. struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
  7176. struct cdp_txrx_stats_req req = {0,};
  7177. dp_aggregate_pdev_stats(pdev);
  7178. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  7179. req.cookie_val = 1;
  7180. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7181. req.param1, req.param2, req.param3, 0,
  7182. req.cookie_val, 0);
  7183. msleep(DP_MAX_SLEEP_TIME);
  7184. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  7185. req.cookie_val = 1;
  7186. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7187. req.param1, req.param2, req.param3, 0,
  7188. req.cookie_val, 0);
  7189. msleep(DP_MAX_SLEEP_TIME);
  7190. qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
  7191. return TXRX_STATS_LEVEL;
  7192. }
  7193. /**
  7194. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  7195. * @pdev: DP_PDEV handle
  7196. * @map_id: ID of map that needs to be updated
  7197. * @tos: index value in map
  7198. * @tid: tid value passed by the user
  7199. *
  7200. * Return: void
  7201. */
  7202. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  7203. uint8_t map_id, uint8_t tos, uint8_t tid)
  7204. {
  7205. uint8_t dscp;
  7206. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  7207. struct dp_soc *soc = pdev->soc;
  7208. if (!soc)
  7209. return;
  7210. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  7211. pdev->dscp_tid_map[map_id][dscp] = tid;
  7212. if (map_id < soc->num_hw_dscp_tid_map)
  7213. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  7214. map_id, dscp);
  7215. return;
  7216. }
  7217. /**
  7218. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  7219. * @pdev_handle: pdev handle
  7220. * @val: hmmc-dscp flag value
  7221. *
  7222. * Return: void
  7223. */
  7224. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  7225. bool val)
  7226. {
  7227. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7228. pdev->hmmc_tid_override_en = val;
  7229. }
  7230. /**
  7231. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  7232. * @pdev_handle: pdev handle
  7233. * @tid: tid value
  7234. *
  7235. * Return: void
  7236. */
  7237. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7238. uint8_t tid)
  7239. {
  7240. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7241. pdev->hmmc_tid = tid;
  7242. }
  7243. /**
  7244. * dp_fw_stats_process(): Process TxRX FW stats request
  7245. * @vdev_handle: DP VDEV handle
  7246. * @req: stats request
  7247. *
  7248. * return: int
  7249. */
  7250. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  7251. struct cdp_txrx_stats_req *req)
  7252. {
  7253. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7254. struct dp_pdev *pdev = NULL;
  7255. uint32_t stats = req->stats;
  7256. uint8_t mac_id = req->mac_id;
  7257. if (!vdev) {
  7258. DP_TRACE(NONE, "VDEV not found");
  7259. return 1;
  7260. }
  7261. pdev = vdev->pdev;
  7262. /*
  7263. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7264. * from param0 to param3 according to below rule:
  7265. *
  7266. * PARAM:
  7267. * - config_param0 : start_offset (stats type)
  7268. * - config_param1 : stats bmask from start offset
  7269. * - config_param2 : stats bmask from start offset + 32
  7270. * - config_param3 : stats bmask from start offset + 64
  7271. */
  7272. if (req->stats == CDP_TXRX_STATS_0) {
  7273. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7274. req->param1 = 0xFFFFFFFF;
  7275. req->param2 = 0xFFFFFFFF;
  7276. req->param3 = 0xFFFFFFFF;
  7277. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7278. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7279. }
  7280. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7281. req->param1, req->param2, req->param3,
  7282. 0, 0, mac_id);
  7283. }
  7284. /**
  7285. * dp_txrx_stats_request - function to map to firmware and host stats
  7286. * @vdev: virtual handle
  7287. * @req: stats request
  7288. *
  7289. * Return: QDF_STATUS
  7290. */
  7291. static
  7292. QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
  7293. struct cdp_txrx_stats_req *req)
  7294. {
  7295. int host_stats;
  7296. int fw_stats;
  7297. enum cdp_stats stats;
  7298. int num_stats;
  7299. if (!vdev || !req) {
  7300. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7301. "Invalid vdev/req instance");
  7302. return QDF_STATUS_E_INVAL;
  7303. }
  7304. stats = req->stats;
  7305. if (stats >= CDP_TXRX_MAX_STATS)
  7306. return QDF_STATUS_E_INVAL;
  7307. /*
  7308. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7309. * has to be updated if new FW HTT stats added
  7310. */
  7311. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7312. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7313. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7314. if (stats >= num_stats) {
  7315. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7316. "%s: Invalid stats option: %d", __func__, stats);
  7317. return QDF_STATUS_E_INVAL;
  7318. }
  7319. req->stats = stats;
  7320. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7321. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7323. "stats: %u fw_stats_type: %d host_stats: %d",
  7324. stats, fw_stats, host_stats);
  7325. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7326. /* update request with FW stats type */
  7327. req->stats = fw_stats;
  7328. return dp_fw_stats_process(vdev, req);
  7329. }
  7330. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7331. (host_stats <= TXRX_HOST_STATS_MAX))
  7332. return dp_print_host_stats(vdev, req);
  7333. else
  7334. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7335. "Wrong Input for TxRx Stats");
  7336. return QDF_STATUS_SUCCESS;
  7337. }
  7338. /*
  7339. * dp_print_napi_stats(): NAPI stats
  7340. * @soc - soc handle
  7341. */
  7342. static void dp_print_napi_stats(struct dp_soc *soc)
  7343. {
  7344. hif_print_napi_stats(soc->hif_handle);
  7345. }
  7346. /*
  7347. * dp_print_per_ring_stats(): Packet count per ring
  7348. * @soc - soc handle
  7349. */
  7350. static void dp_print_per_ring_stats(struct dp_soc *soc)
  7351. {
  7352. uint8_t ring;
  7353. uint16_t core;
  7354. uint64_t total_packets;
  7355. DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
  7356. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  7357. total_packets = 0;
  7358. DP_TRACE_STATS(INFO_HIGH,
  7359. "Packets on ring %u:", ring);
  7360. for (core = 0; core < NR_CPUS; core++) {
  7361. DP_TRACE_STATS(INFO_HIGH,
  7362. "Packets arriving on core %u: %llu",
  7363. core,
  7364. soc->stats.rx.ring_packets[core][ring]);
  7365. total_packets += soc->stats.rx.ring_packets[core][ring];
  7366. }
  7367. DP_TRACE_STATS(INFO_HIGH,
  7368. "Total packets on ring %u: %llu",
  7369. ring, total_packets);
  7370. }
  7371. }
  7372. /*
  7373. * dp_txrx_path_stats() - Function to display dump stats
  7374. * @soc - soc handle
  7375. *
  7376. * return: none
  7377. */
  7378. static void dp_txrx_path_stats(struct dp_soc *soc)
  7379. {
  7380. uint8_t error_code;
  7381. uint8_t loop_pdev;
  7382. struct dp_pdev *pdev;
  7383. uint8_t i;
  7384. if (!soc) {
  7385. DP_TRACE(ERROR, "%s: Invalid access",
  7386. __func__);
  7387. return;
  7388. }
  7389. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  7390. pdev = soc->pdev_list[loop_pdev];
  7391. dp_aggregate_pdev_stats(pdev);
  7392. DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
  7393. DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
  7394. pdev->stats.tx_i.rcvd.num,
  7395. pdev->stats.tx_i.rcvd.bytes);
  7396. DP_TRACE_STATS(INFO_HIGH,
  7397. "processed from host: %u msdus (%llu bytes)",
  7398. pdev->stats.tx_i.processed.num,
  7399. pdev->stats.tx_i.processed.bytes);
  7400. DP_TRACE_STATS(INFO_HIGH,
  7401. "successfully transmitted: %u msdus (%llu bytes)",
  7402. pdev->stats.tx.tx_success.num,
  7403. pdev->stats.tx.tx_success.bytes);
  7404. DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
  7405. DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
  7406. pdev->stats.tx_i.dropped.dropped_pkt.num);
  7407. DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
  7408. pdev->stats.tx_i.dropped.desc_na.num);
  7409. DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
  7410. pdev->stats.tx_i.dropped.ring_full);
  7411. DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
  7412. pdev->stats.tx_i.dropped.enqueue_fail);
  7413. DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
  7414. pdev->stats.tx_i.dropped.dma_error);
  7415. DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
  7416. DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
  7417. pdev->stats.tx.tx_failed);
  7418. DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
  7419. pdev->stats.tx.dropped.age_out);
  7420. DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
  7421. pdev->stats.tx.dropped.fw_rem.num);
  7422. DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
  7423. pdev->stats.tx.dropped.fw_rem.bytes);
  7424. DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
  7425. pdev->stats.tx.dropped.fw_rem_tx);
  7426. DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
  7427. pdev->stats.tx.dropped.fw_rem_notx);
  7428. DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
  7429. pdev->soc->stats.tx.tx_invalid_peer.num);
  7430. DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
  7431. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7432. pdev->stats.tx_comp_histogram.pkts_1);
  7433. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7434. pdev->stats.tx_comp_histogram.pkts_2_20);
  7435. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7436. pdev->stats.tx_comp_histogram.pkts_21_40);
  7437. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7438. pdev->stats.tx_comp_histogram.pkts_41_60);
  7439. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7440. pdev->stats.tx_comp_histogram.pkts_61_80);
  7441. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7442. pdev->stats.tx_comp_histogram.pkts_81_100);
  7443. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7444. pdev->stats.tx_comp_histogram.pkts_101_200);
  7445. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7446. pdev->stats.tx_comp_histogram.pkts_201_plus);
  7447. DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
  7448. DP_TRACE_STATS(INFO_HIGH,
  7449. "delivered %u msdus ( %llu bytes),",
  7450. pdev->stats.rx.to_stack.num,
  7451. pdev->stats.rx.to_stack.bytes);
  7452. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  7453. DP_TRACE_STATS(INFO_HIGH,
  7454. "received on reo[%d] %u msdus( %llu bytes),",
  7455. i, pdev->stats.rx.rcvd_reo[i].num,
  7456. pdev->stats.rx.rcvd_reo[i].bytes);
  7457. DP_TRACE_STATS(INFO_HIGH,
  7458. "intra-bss packets %u msdus ( %llu bytes),",
  7459. pdev->stats.rx.intra_bss.pkts.num,
  7460. pdev->stats.rx.intra_bss.pkts.bytes);
  7461. DP_TRACE_STATS(INFO_HIGH,
  7462. "intra-bss fails %u msdus ( %llu bytes),",
  7463. pdev->stats.rx.intra_bss.fail.num,
  7464. pdev->stats.rx.intra_bss.fail.bytes);
  7465. DP_TRACE_STATS(INFO_HIGH,
  7466. "raw packets %u msdus ( %llu bytes),",
  7467. pdev->stats.rx.raw.num,
  7468. pdev->stats.rx.raw.bytes);
  7469. DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
  7470. pdev->stats.rx.err.mic_err);
  7471. DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
  7472. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  7473. DP_TRACE_STATS(INFO_HIGH, "sw_peer_id invalid %u",
  7474. pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
  7475. DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
  7476. DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
  7477. pdev->soc->stats.rx.err.invalid_rbm);
  7478. DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
  7479. pdev->soc->stats.rx.err.hal_ring_access_fail);
  7480. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  7481. error_code++) {
  7482. if (!pdev->soc->stats.rx.err.reo_error[error_code])
  7483. continue;
  7484. DP_TRACE_STATS(INFO_HIGH,
  7485. "Reo error number (%u): %u msdus",
  7486. error_code,
  7487. pdev->soc->stats.rx.err
  7488. .reo_error[error_code]);
  7489. }
  7490. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  7491. error_code++) {
  7492. if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
  7493. continue;
  7494. DP_TRACE_STATS(INFO_HIGH,
  7495. "Rxdma error number (%u): %u msdus",
  7496. error_code,
  7497. pdev->soc->stats.rx.err
  7498. .rxdma_error[error_code]);
  7499. }
  7500. DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
  7501. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7502. pdev->stats.rx_ind_histogram.pkts_1);
  7503. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7504. pdev->stats.rx_ind_histogram.pkts_2_20);
  7505. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7506. pdev->stats.rx_ind_histogram.pkts_21_40);
  7507. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7508. pdev->stats.rx_ind_histogram.pkts_41_60);
  7509. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7510. pdev->stats.rx_ind_histogram.pkts_61_80);
  7511. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7512. pdev->stats.rx_ind_histogram.pkts_81_100);
  7513. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7514. pdev->stats.rx_ind_histogram.pkts_101_200);
  7515. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7516. pdev->stats.rx_ind_histogram.pkts_201_plus);
  7517. DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  7518. __func__,
  7519. pdev->soc->wlan_cfg_ctx
  7520. ->tso_enabled,
  7521. pdev->soc->wlan_cfg_ctx
  7522. ->lro_enabled,
  7523. pdev->soc->wlan_cfg_ctx
  7524. ->rx_hash,
  7525. pdev->soc->wlan_cfg_ctx
  7526. ->napi_enabled);
  7527. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7528. DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  7529. __func__,
  7530. pdev->soc->wlan_cfg_ctx
  7531. ->tx_flow_stop_queue_threshold,
  7532. pdev->soc->wlan_cfg_ctx
  7533. ->tx_flow_start_queue_offset);
  7534. #endif
  7535. }
  7536. }
  7537. /*
  7538. * dp_txrx_dump_stats() - Dump statistics
  7539. * @value - Statistics option
  7540. */
  7541. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  7542. enum qdf_stats_verbosity_level level)
  7543. {
  7544. struct dp_soc *soc =
  7545. (struct dp_soc *)psoc;
  7546. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7547. if (!soc) {
  7548. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7549. "%s: soc is NULL", __func__);
  7550. return QDF_STATUS_E_INVAL;
  7551. }
  7552. switch (value) {
  7553. case CDP_TXRX_PATH_STATS:
  7554. dp_txrx_path_stats(soc);
  7555. break;
  7556. case CDP_RX_RING_STATS:
  7557. dp_print_per_ring_stats(soc);
  7558. break;
  7559. case CDP_TXRX_TSO_STATS:
  7560. /* TODO: NOT IMPLEMENTED */
  7561. break;
  7562. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7563. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7564. break;
  7565. case CDP_DP_NAPI_STATS:
  7566. dp_print_napi_stats(soc);
  7567. break;
  7568. case CDP_TXRX_DESC_STATS:
  7569. /* TODO: NOT IMPLEMENTED */
  7570. break;
  7571. default:
  7572. status = QDF_STATUS_E_INVAL;
  7573. break;
  7574. }
  7575. return status;
  7576. }
  7577. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7578. /**
  7579. * dp_update_flow_control_parameters() - API to store datapath
  7580. * config parameters
  7581. * @soc: soc handle
  7582. * @cfg: ini parameter handle
  7583. *
  7584. * Return: void
  7585. */
  7586. static inline
  7587. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7588. struct cdp_config_params *params)
  7589. {
  7590. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7591. params->tx_flow_stop_queue_threshold;
  7592. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7593. params->tx_flow_start_queue_offset;
  7594. }
  7595. #else
  7596. static inline
  7597. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7598. struct cdp_config_params *params)
  7599. {
  7600. }
  7601. #endif
  7602. /**
  7603. * dp_update_config_parameters() - API to store datapath
  7604. * config parameters
  7605. * @soc: soc handle
  7606. * @cfg: ini parameter handle
  7607. *
  7608. * Return: status
  7609. */
  7610. static
  7611. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7612. struct cdp_config_params *params)
  7613. {
  7614. struct dp_soc *soc = (struct dp_soc *)psoc;
  7615. if (!(soc)) {
  7616. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7617. "%s: Invalid handle", __func__);
  7618. return QDF_STATUS_E_INVAL;
  7619. }
  7620. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7621. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7622. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7623. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7624. params->tcp_udp_checksumoffload;
  7625. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7626. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7627. soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
  7628. dp_update_flow_control_parameters(soc, params);
  7629. return QDF_STATUS_SUCCESS;
  7630. }
  7631. /**
  7632. * dp_txrx_set_wds_rx_policy() - API to store datapath
  7633. * config parameters
  7634. * @vdev_handle - datapath vdev handle
  7635. * @cfg: ini parameter handle
  7636. *
  7637. * Return: status
  7638. */
  7639. #ifdef WDS_VENDOR_EXTENSION
  7640. void
  7641. dp_txrx_set_wds_rx_policy(
  7642. struct cdp_vdev *vdev_handle,
  7643. u_int32_t val)
  7644. {
  7645. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7646. struct dp_peer *peer;
  7647. if (vdev->opmode == wlan_op_mode_ap) {
  7648. /* for ap, set it on bss_peer */
  7649. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  7650. if (peer->bss_peer) {
  7651. peer->wds_ecm.wds_rx_filter = 1;
  7652. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7653. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7654. break;
  7655. }
  7656. }
  7657. } else if (vdev->opmode == wlan_op_mode_sta) {
  7658. peer = TAILQ_FIRST(&vdev->peer_list);
  7659. peer->wds_ecm.wds_rx_filter = 1;
  7660. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7661. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7662. }
  7663. }
  7664. /**
  7665. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  7666. *
  7667. * @peer_handle - datapath peer handle
  7668. * @wds_tx_ucast: policy for unicast transmission
  7669. * @wds_tx_mcast: policy for multicast transmission
  7670. *
  7671. * Return: void
  7672. */
  7673. void
  7674. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  7675. int wds_tx_ucast, int wds_tx_mcast)
  7676. {
  7677. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7678. if (wds_tx_ucast || wds_tx_mcast) {
  7679. peer->wds_enabled = 1;
  7680. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  7681. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  7682. } else {
  7683. peer->wds_enabled = 0;
  7684. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  7685. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  7686. }
  7687. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7688. FL("Policy Update set to :\
  7689. peer->wds_enabled %d\
  7690. peer->wds_ecm.wds_tx_ucast_4addr %d\
  7691. peer->wds_ecm.wds_tx_mcast_4addr %d"),
  7692. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  7693. peer->wds_ecm.wds_tx_mcast_4addr);
  7694. return;
  7695. }
  7696. #endif
  7697. static struct cdp_wds_ops dp_ops_wds = {
  7698. .vdev_set_wds = dp_vdev_set_wds,
  7699. #ifdef WDS_VENDOR_EXTENSION
  7700. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7701. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7702. #endif
  7703. };
  7704. /*
  7705. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7706. * @vdev_handle - datapath vdev handle
  7707. * @callback - callback function
  7708. * @ctxt: callback context
  7709. *
  7710. */
  7711. static void
  7712. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  7713. ol_txrx_data_tx_cb callback, void *ctxt)
  7714. {
  7715. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7716. vdev->tx_non_std_data_callback.func = callback;
  7717. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7718. }
  7719. /**
  7720. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7721. * @pdev_hdl: datapath pdev handle
  7722. *
  7723. * Return: opaque pointer to dp txrx handle
  7724. */
  7725. static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
  7726. {
  7727. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7728. return pdev->dp_txrx_handle;
  7729. }
  7730. /**
  7731. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7732. * @pdev_hdl: datapath pdev handle
  7733. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7734. *
  7735. * Return: void
  7736. */
  7737. static void
  7738. dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
  7739. {
  7740. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7741. pdev->dp_txrx_handle = dp_txrx_hdl;
  7742. }
  7743. /**
  7744. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7745. * @soc_handle: datapath soc handle
  7746. *
  7747. * Return: opaque pointer to external dp (non-core DP)
  7748. */
  7749. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7750. {
  7751. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7752. return soc->external_txrx_handle;
  7753. }
  7754. /**
  7755. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7756. * @soc_handle: datapath soc handle
  7757. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7758. *
  7759. * Return: void
  7760. */
  7761. static void
  7762. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7763. {
  7764. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7765. soc->external_txrx_handle = txrx_handle;
  7766. }
  7767. /**
  7768. * dp_get_cfg_capabilities() - get dp capabilities
  7769. * @soc_handle: datapath soc handle
  7770. * @dp_caps: enum for dp capabilities
  7771. *
  7772. * Return: bool to determine if dp caps is enabled
  7773. */
  7774. static bool
  7775. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7776. enum cdp_capabilities dp_caps)
  7777. {
  7778. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7779. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7780. }
  7781. #ifdef FEATURE_AST
  7782. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  7783. {
  7784. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  7785. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  7786. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7787. /*
  7788. * For BSS peer, new peer is not created on alloc_node if the
  7789. * peer with same address already exists , instead refcnt is
  7790. * increased for existing peer. Correspondingly in delete path,
  7791. * only refcnt is decreased; and peer is only deleted , when all
  7792. * references are deleted. So delete_in_progress should not be set
  7793. * for bss_peer, unless only 2 reference remains (peer map reference
  7794. * and peer hash table reference).
  7795. */
  7796. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
  7797. return;
  7798. }
  7799. qdf_spin_lock_bh(&soc->ast_lock);
  7800. peer->delete_in_progress = true;
  7801. dp_peer_delete_ast_entries(soc, peer);
  7802. qdf_spin_unlock_bh(&soc->ast_lock);
  7803. }
  7804. #endif
  7805. #ifdef ATH_SUPPORT_NAC_RSSI
  7806. /**
  7807. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7808. * @vdev_hdl: DP vdev handle
  7809. * @rssi: rssi value
  7810. *
  7811. * Return: 0 for success. nonzero for failure.
  7812. */
  7813. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7814. char *mac_addr,
  7815. uint8_t *rssi)
  7816. {
  7817. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7818. struct dp_pdev *pdev = vdev->pdev;
  7819. struct dp_neighbour_peer *peer = NULL;
  7820. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7821. *rssi = 0;
  7822. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7823. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7824. neighbour_peer_list_elem) {
  7825. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7826. mac_addr, DP_MAC_ADDR_LEN) == 0) {
  7827. *rssi = peer->rssi;
  7828. status = QDF_STATUS_SUCCESS;
  7829. break;
  7830. }
  7831. }
  7832. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7833. return status;
  7834. }
  7835. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7836. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7837. uint8_t chan_num)
  7838. {
  7839. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7840. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7841. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7842. pdev->nac_rssi_filtering = 1;
  7843. /* Store address of NAC (neighbour peer) which will be checked
  7844. * against TA of received packets.
  7845. */
  7846. if (cmd == CDP_NAC_PARAM_ADD) {
  7847. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7848. client_macaddr);
  7849. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7850. dp_update_filter_neighbour_peers(vdev_handle,
  7851. DP_NAC_PARAM_DEL,
  7852. client_macaddr);
  7853. }
  7854. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7855. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7856. ((void *)vdev->pdev->ctrl_pdev,
  7857. vdev->vdev_id, cmd, bssid);
  7858. return QDF_STATUS_SUCCESS;
  7859. }
  7860. #endif
  7861. /**
  7862. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7863. * for pktlog
  7864. * @txrx_pdev_handle: cdp_pdev handle
  7865. * @enb_dsb: Enable or disable peer based filtering
  7866. *
  7867. * Return: QDF_STATUS
  7868. */
  7869. static int
  7870. dp_enable_peer_based_pktlog(
  7871. struct cdp_pdev *txrx_pdev_handle,
  7872. char *mac_addr, uint8_t enb_dsb)
  7873. {
  7874. struct dp_peer *peer;
  7875. uint8_t local_id;
  7876. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7877. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7878. mac_addr, &local_id);
  7879. if (!peer) {
  7880. dp_err("Invalid Peer");
  7881. return QDF_STATUS_E_FAILURE;
  7882. }
  7883. peer->peer_based_pktlog_filter = enb_dsb;
  7884. pdev->dp_peer_based_pktlog = enb_dsb;
  7885. return QDF_STATUS_SUCCESS;
  7886. }
  7887. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  7888. uint32_t max_peers,
  7889. uint32_t max_ast_index,
  7890. bool peer_map_unmap_v2)
  7891. {
  7892. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7893. soc->max_peers = max_peers;
  7894. qdf_print ("%s max_peers %u, max_ast_index: %u\n",
  7895. __func__, max_peers, max_ast_index);
  7896. wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
  7897. if (dp_peer_find_attach(soc))
  7898. return QDF_STATUS_E_FAILURE;
  7899. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  7900. return QDF_STATUS_SUCCESS;
  7901. }
  7902. /**
  7903. * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
  7904. * @dp_pdev: dp pdev handle
  7905. * @ctrl_pdev: UMAC ctrl pdev handle
  7906. *
  7907. * Return: void
  7908. */
  7909. static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
  7910. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  7911. {
  7912. struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
  7913. pdev->ctrl_pdev = ctrl_pdev;
  7914. }
  7915. /*
  7916. * dp_get_cfg() - get dp cfg
  7917. * @soc: cdp soc handle
  7918. * @cfg: cfg enum
  7919. *
  7920. * Return: cfg value
  7921. */
  7922. static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
  7923. {
  7924. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  7925. uint32_t value = 0;
  7926. switch (cfg) {
  7927. case cfg_dp_enable_data_stall:
  7928. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  7929. break;
  7930. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  7931. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  7932. break;
  7933. case cfg_dp_tso_enable:
  7934. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  7935. break;
  7936. case cfg_dp_lro_enable:
  7937. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  7938. break;
  7939. case cfg_dp_gro_enable:
  7940. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  7941. break;
  7942. case cfg_dp_tx_flow_start_queue_offset:
  7943. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  7944. break;
  7945. case cfg_dp_tx_flow_stop_queue_threshold:
  7946. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  7947. break;
  7948. case cfg_dp_disable_intra_bss_fwd:
  7949. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  7950. break;
  7951. default:
  7952. value = 0;
  7953. }
  7954. return value;
  7955. }
  7956. static struct cdp_cmn_ops dp_ops_cmn = {
  7957. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  7958. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  7959. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  7960. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  7961. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  7962. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  7963. .txrx_peer_create = dp_peer_create_wifi3,
  7964. .txrx_peer_setup = dp_peer_setup_wifi3,
  7965. #ifdef FEATURE_AST
  7966. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  7967. #else
  7968. .txrx_peer_teardown = NULL,
  7969. #endif
  7970. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  7971. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  7972. .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
  7973. .txrx_peer_get_ast_info_by_pdev =
  7974. dp_peer_get_ast_info_by_pdevid_wifi3,
  7975. .txrx_peer_ast_delete_by_soc =
  7976. dp_peer_ast_entry_del_by_soc,
  7977. .txrx_peer_ast_delete_by_pdev =
  7978. dp_peer_ast_entry_del_by_pdev,
  7979. .txrx_peer_delete = dp_peer_delete_wifi3,
  7980. .txrx_vdev_register = dp_vdev_register_wifi3,
  7981. .txrx_vdev_flush_peers = dp_vdev_flush_peers,
  7982. .txrx_soc_detach = dp_soc_detach_wifi3,
  7983. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  7984. .txrx_soc_init = dp_soc_init_wifi3,
  7985. .txrx_tso_soc_attach = dp_tso_soc_attach,
  7986. .txrx_tso_soc_detach = dp_tso_soc_detach,
  7987. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  7988. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  7989. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  7990. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  7991. .txrx_ath_getstats = dp_get_device_stats,
  7992. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  7993. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  7994. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  7995. .delba_process = dp_delba_process_wifi3,
  7996. .set_addba_response = dp_set_addba_response,
  7997. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  7998. .flush_cache_rx_queue = NULL,
  7999. /* TODO: get API's for dscp-tid need to be added*/
  8000. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  8001. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  8002. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  8003. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  8004. .txrx_get_total_per = dp_get_total_per,
  8005. .txrx_stats_request = dp_txrx_stats_request,
  8006. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  8007. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  8008. .txrx_get_vow_config_frm_pdev = NULL,
  8009. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  8010. .txrx_set_nac = dp_set_nac,
  8011. .txrx_get_tx_pending = dp_get_tx_pending,
  8012. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  8013. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  8014. .display_stats = dp_txrx_dump_stats,
  8015. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  8016. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  8017. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  8018. .txrx_intr_detach = dp_soc_interrupt_detach,
  8019. .set_pn_check = dp_set_pn_check_wifi3,
  8020. .update_config_parameters = dp_update_config_parameters,
  8021. /* TODO: Add other functions */
  8022. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  8023. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  8024. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  8025. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  8026. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  8027. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  8028. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  8029. .tx_send = dp_tx_send,
  8030. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  8031. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  8032. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  8033. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  8034. .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
  8035. .txrx_get_os_rx_handles_from_vdev =
  8036. dp_get_os_rx_handles_from_vdev_wifi3,
  8037. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  8038. .get_dp_capabilities = dp_get_cfg_capabilities,
  8039. .txrx_get_cfg = dp_get_cfg,
  8040. };
  8041. static struct cdp_ctrl_ops dp_ops_ctrl = {
  8042. .txrx_peer_authorize = dp_peer_authorize,
  8043. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  8044. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  8045. #ifdef MESH_MODE_SUPPORT
  8046. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  8047. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  8048. #endif
  8049. .txrx_set_vdev_param = dp_set_vdev_param,
  8050. .txrx_peer_set_nawds = dp_peer_set_nawds,
  8051. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  8052. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  8053. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  8054. .txrx_update_filter_neighbour_peers =
  8055. dp_update_filter_neighbour_peers,
  8056. .txrx_get_sec_type = dp_get_sec_type,
  8057. /* TODO: Add other functions */
  8058. .txrx_wdi_event_sub = dp_wdi_event_sub,
  8059. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  8060. #ifdef WDI_EVENT_ENABLE
  8061. .txrx_get_pldev = dp_get_pldev,
  8062. #endif
  8063. .txrx_set_pdev_param = dp_set_pdev_param,
  8064. #ifdef ATH_SUPPORT_NAC_RSSI
  8065. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  8066. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  8067. #endif
  8068. .set_key = dp_set_michael_key,
  8069. .txrx_get_vdev_param = dp_get_vdev_param,
  8070. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  8071. };
  8072. static struct cdp_me_ops dp_ops_me = {
  8073. #ifdef ATH_SUPPORT_IQUE
  8074. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  8075. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  8076. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  8077. #endif
  8078. };
  8079. static struct cdp_mon_ops dp_ops_mon = {
  8080. .txrx_monitor_set_filter_ucast_data = NULL,
  8081. .txrx_monitor_set_filter_mcast_data = NULL,
  8082. .txrx_monitor_set_filter_non_data = NULL,
  8083. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  8084. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  8085. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  8086. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  8087. /* Added support for HK advance filter */
  8088. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  8089. };
  8090. static struct cdp_host_stats_ops dp_ops_host_stats = {
  8091. .txrx_per_peer_stats = dp_get_host_peer_stats,
  8092. .get_fw_peer_stats = dp_get_fw_peer_stats,
  8093. .get_htt_stats = dp_get_htt_stats,
  8094. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  8095. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  8096. .txrx_stats_publish = dp_txrx_stats_publish,
  8097. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  8098. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  8099. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  8100. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  8101. /* TODO */
  8102. };
  8103. static struct cdp_raw_ops dp_ops_raw = {
  8104. /* TODO */
  8105. };
  8106. #ifdef CONFIG_WIN
  8107. static struct cdp_pflow_ops dp_ops_pflow = {
  8108. /* TODO */
  8109. };
  8110. #endif /* CONFIG_WIN */
  8111. #ifdef FEATURE_RUNTIME_PM
  8112. /**
  8113. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  8114. * @opaque_pdev: DP pdev context
  8115. *
  8116. * DP is ready to runtime suspend if there are no pending TX packets.
  8117. *
  8118. * Return: QDF_STATUS
  8119. */
  8120. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  8121. {
  8122. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8123. struct dp_soc *soc = pdev->soc;
  8124. /* Abort if there are any pending TX packets */
  8125. if (dp_get_tx_pending(opaque_pdev) > 0) {
  8126. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  8127. FL("Abort suspend due to pending TX packets"));
  8128. return QDF_STATUS_E_AGAIN;
  8129. }
  8130. if (soc->intr_mode == DP_INTR_POLL)
  8131. qdf_timer_stop(&soc->int_timer);
  8132. return QDF_STATUS_SUCCESS;
  8133. }
  8134. /**
  8135. * dp_runtime_resume() - ensure DP is ready to runtime resume
  8136. * @opaque_pdev: DP pdev context
  8137. *
  8138. * Resume DP for runtime PM.
  8139. *
  8140. * Return: QDF_STATUS
  8141. */
  8142. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  8143. {
  8144. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8145. struct dp_soc *soc = pdev->soc;
  8146. void *hal_srng;
  8147. int i;
  8148. if (soc->intr_mode == DP_INTR_POLL)
  8149. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8150. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  8151. hal_srng = soc->tcl_data_ring[i].hal_srng;
  8152. if (hal_srng) {
  8153. /* We actually only need to acquire the lock */
  8154. hal_srng_access_start(soc->hal_soc, hal_srng);
  8155. /* Update SRC ring head pointer for HW to send
  8156. all pending packets */
  8157. hal_srng_access_end(soc->hal_soc, hal_srng);
  8158. }
  8159. }
  8160. return QDF_STATUS_SUCCESS;
  8161. }
  8162. #endif /* FEATURE_RUNTIME_PM */
  8163. #ifndef CONFIG_WIN
  8164. static struct cdp_misc_ops dp_ops_misc = {
  8165. #ifdef FEATURE_WLAN_TDLS
  8166. .tx_non_std = dp_tx_non_std,
  8167. #endif /* FEATURE_WLAN_TDLS */
  8168. .get_opmode = dp_get_opmode,
  8169. #ifdef FEATURE_RUNTIME_PM
  8170. .runtime_suspend = dp_runtime_suspend,
  8171. .runtime_resume = dp_runtime_resume,
  8172. #endif /* FEATURE_RUNTIME_PM */
  8173. .pkt_log_init = dp_pkt_log_init,
  8174. .pkt_log_con_service = dp_pkt_log_con_service,
  8175. .get_num_rx_contexts = dp_get_num_rx_contexts,
  8176. };
  8177. static struct cdp_flowctl_ops dp_ops_flowctl = {
  8178. /* WIFI 3.0 DP implement as required. */
  8179. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  8180. .flow_pool_map_handler = dp_tx_flow_pool_map,
  8181. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  8182. .register_pause_cb = dp_txrx_register_pause_cb,
  8183. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  8184. .tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
  8185. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  8186. };
  8187. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  8188. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8189. };
  8190. #ifdef IPA_OFFLOAD
  8191. static struct cdp_ipa_ops dp_ops_ipa = {
  8192. .ipa_get_resource = dp_ipa_get_resource,
  8193. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  8194. .ipa_op_response = dp_ipa_op_response,
  8195. .ipa_register_op_cb = dp_ipa_register_op_cb,
  8196. .ipa_get_stat = dp_ipa_get_stat,
  8197. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  8198. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  8199. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  8200. .ipa_setup = dp_ipa_setup,
  8201. .ipa_cleanup = dp_ipa_cleanup,
  8202. .ipa_setup_iface = dp_ipa_setup_iface,
  8203. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  8204. .ipa_enable_pipes = dp_ipa_enable_pipes,
  8205. .ipa_disable_pipes = dp_ipa_disable_pipes,
  8206. .ipa_set_perf_level = dp_ipa_set_perf_level
  8207. };
  8208. #endif
  8209. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  8210. {
  8211. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8212. struct dp_soc *soc = pdev->soc;
  8213. int timeout = SUSPEND_DRAIN_WAIT;
  8214. int drain_wait_delay = 50; /* 50 ms */
  8215. /* Abort if there are any pending TX packets */
  8216. while (dp_get_tx_pending(opaque_pdev) > 0) {
  8217. qdf_sleep(drain_wait_delay);
  8218. if (timeout <= 0) {
  8219. dp_err("TX frames are pending, abort suspend");
  8220. return QDF_STATUS_E_TIMEOUT;
  8221. }
  8222. timeout = timeout - drain_wait_delay;
  8223. }
  8224. if (soc->intr_mode == DP_INTR_POLL)
  8225. qdf_timer_stop(&soc->int_timer);
  8226. return QDF_STATUS_SUCCESS;
  8227. }
  8228. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  8229. {
  8230. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8231. struct dp_soc *soc = pdev->soc;
  8232. if (soc->intr_mode == DP_INTR_POLL)
  8233. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8234. return QDF_STATUS_SUCCESS;
  8235. }
  8236. static struct cdp_bus_ops dp_ops_bus = {
  8237. .bus_suspend = dp_bus_suspend,
  8238. .bus_resume = dp_bus_resume
  8239. };
  8240. static struct cdp_ocb_ops dp_ops_ocb = {
  8241. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8242. };
  8243. static struct cdp_throttle_ops dp_ops_throttle = {
  8244. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8245. };
  8246. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  8247. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8248. };
  8249. static struct cdp_cfg_ops dp_ops_cfg = {
  8250. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8251. };
  8252. /*
  8253. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8254. * @dev: physical device instance
  8255. * @peer_mac_addr: peer mac address
  8256. * @local_id: local id for the peer
  8257. * @debug_id: to track enum peer access
  8258. *
  8259. * Return: peer instance pointer
  8260. */
  8261. static inline void *
  8262. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8263. uint8_t *local_id,
  8264. enum peer_debug_id_type debug_id)
  8265. {
  8266. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8267. struct dp_peer *peer;
  8268. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8269. if (!peer)
  8270. return NULL;
  8271. *local_id = peer->local_id;
  8272. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  8273. return peer;
  8274. }
  8275. /*
  8276. * dp_peer_release_ref - release peer ref count
  8277. * @peer: peer handle
  8278. * @debug_id: to track enum peer access
  8279. *
  8280. * Return: None
  8281. */
  8282. static inline
  8283. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8284. {
  8285. dp_peer_unref_delete(peer);
  8286. }
  8287. static struct cdp_peer_ops dp_ops_peer = {
  8288. .register_peer = dp_register_peer,
  8289. .clear_peer = dp_clear_peer,
  8290. .find_peer_by_addr = dp_find_peer_by_addr,
  8291. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8292. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8293. .peer_release_ref = dp_peer_release_ref,
  8294. .local_peer_id = dp_local_peer_id,
  8295. .peer_find_by_local_id = dp_peer_find_by_local_id,
  8296. .peer_state_update = dp_peer_state_update,
  8297. .get_vdevid = dp_get_vdevid,
  8298. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  8299. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8300. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8301. .get_peer_state = dp_get_peer_state,
  8302. };
  8303. #endif
  8304. static struct cdp_ops dp_txrx_ops = {
  8305. .cmn_drv_ops = &dp_ops_cmn,
  8306. .ctrl_ops = &dp_ops_ctrl,
  8307. .me_ops = &dp_ops_me,
  8308. .mon_ops = &dp_ops_mon,
  8309. .host_stats_ops = &dp_ops_host_stats,
  8310. .wds_ops = &dp_ops_wds,
  8311. .raw_ops = &dp_ops_raw,
  8312. #ifdef CONFIG_WIN
  8313. .pflow_ops = &dp_ops_pflow,
  8314. #endif /* CONFIG_WIN */
  8315. #ifndef CONFIG_WIN
  8316. .misc_ops = &dp_ops_misc,
  8317. .cfg_ops = &dp_ops_cfg,
  8318. .flowctl_ops = &dp_ops_flowctl,
  8319. .l_flowctl_ops = &dp_ops_l_flowctl,
  8320. #ifdef IPA_OFFLOAD
  8321. .ipa_ops = &dp_ops_ipa,
  8322. #endif
  8323. .bus_ops = &dp_ops_bus,
  8324. .ocb_ops = &dp_ops_ocb,
  8325. .peer_ops = &dp_ops_peer,
  8326. .throttle_ops = &dp_ops_throttle,
  8327. .mob_stats_ops = &dp_ops_mob_stats,
  8328. #endif
  8329. };
  8330. /*
  8331. * dp_soc_set_txrx_ring_map()
  8332. * @dp_soc: DP handler for soc
  8333. *
  8334. * Return: Void
  8335. */
  8336. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  8337. {
  8338. uint32_t i;
  8339. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  8340. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  8341. }
  8342. }
  8343. #ifdef QCA_WIFI_QCA8074
  8344. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  8345. /**
  8346. * dp_soc_attach_wifi3() - Attach txrx SOC
  8347. * @ctrl_psoc: Opaque SOC handle from control plane
  8348. * @htc_handle: Opaque HTC handle
  8349. * @hif_handle: Opaque HIF handle
  8350. * @qdf_osdev: QDF device
  8351. * @ol_ops: Offload Operations
  8352. * @device_id: Device ID
  8353. *
  8354. * Return: DP SOC handle on success, NULL on failure
  8355. */
  8356. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8357. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8358. struct ol_if_ops *ol_ops, uint16_t device_id)
  8359. {
  8360. struct dp_soc *dp_soc = NULL;
  8361. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8362. ol_ops, device_id);
  8363. if (!dp_soc)
  8364. return NULL;
  8365. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  8366. return NULL;
  8367. return (void *)dp_soc;
  8368. }
  8369. #else
  8370. /**
  8371. * dp_soc_attach_wifi3() - Attach txrx SOC
  8372. * @ctrl_psoc: Opaque SOC handle from control plane
  8373. * @htc_handle: Opaque HTC handle
  8374. * @hif_handle: Opaque HIF handle
  8375. * @qdf_osdev: QDF device
  8376. * @ol_ops: Offload Operations
  8377. * @device_id: Device ID
  8378. *
  8379. * Return: DP SOC handle on success, NULL on failure
  8380. */
  8381. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8382. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8383. struct ol_if_ops *ol_ops, uint16_t device_id)
  8384. {
  8385. struct dp_soc *dp_soc = NULL;
  8386. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8387. ol_ops, device_id);
  8388. return (void *)dp_soc;
  8389. }
  8390. #endif
  8391. /**
  8392. * dp_soc_attach() - Attach txrx SOC
  8393. * @ctrl_psoc: Opaque SOC handle from control plane
  8394. * @htc_handle: Opaque HTC handle
  8395. * @qdf_osdev: QDF device
  8396. * @ol_ops: Offload Operations
  8397. * @device_id: Device ID
  8398. *
  8399. * Return: DP SOC handle on success, NULL on failure
  8400. */
  8401. static struct dp_soc *
  8402. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8403. struct ol_if_ops *ol_ops, uint16_t device_id)
  8404. {
  8405. int int_ctx;
  8406. struct dp_soc *soc = NULL;
  8407. struct htt_soc *htt_soc = NULL;
  8408. soc = qdf_mem_malloc(sizeof(*soc));
  8409. if (!soc) {
  8410. dp_err("DP SOC memory allocation failed");
  8411. goto fail0;
  8412. }
  8413. int_ctx = 0;
  8414. soc->device_id = device_id;
  8415. soc->cdp_soc.ops = &dp_txrx_ops;
  8416. soc->cdp_soc.ol_ops = ol_ops;
  8417. soc->ctrl_psoc = ctrl_psoc;
  8418. soc->osdev = qdf_osdev;
  8419. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  8420. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  8421. if (!soc->wlan_cfg_ctx) {
  8422. dp_err("wlan_cfg_ctx failed\n");
  8423. goto fail1;
  8424. }
  8425. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  8426. if (!htt_soc) {
  8427. dp_err("HTT attach failed");
  8428. goto fail1;
  8429. }
  8430. soc->htt_handle = htt_soc;
  8431. htt_soc->dp_soc = soc;
  8432. htt_soc->htc_soc = htc_handle;
  8433. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  8434. goto fail2;
  8435. return (void *)soc;
  8436. fail2:
  8437. qdf_mem_free(htt_soc);
  8438. fail1:
  8439. qdf_mem_free(soc);
  8440. fail0:
  8441. return NULL;
  8442. }
  8443. /**
  8444. * dp_soc_init() - Initialize txrx SOC
  8445. * @dp_soc: Opaque DP SOC handle
  8446. * @htc_handle: Opaque HTC handle
  8447. * @hif_handle: Opaque HIF handle
  8448. *
  8449. * Return: DP SOC handle on success, NULL on failure
  8450. */
  8451. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
  8452. {
  8453. int target_type;
  8454. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  8455. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  8456. htt_soc->htc_soc = htc_handle;
  8457. soc->hif_handle = hif_handle;
  8458. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  8459. if (!soc->hal_soc)
  8460. return NULL;
  8461. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
  8462. soc->hal_soc, soc->osdev);
  8463. target_type = hal_get_target_type(soc->hal_soc);
  8464. switch (target_type) {
  8465. case TARGET_TYPE_QCA6290:
  8466. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8467. REO_DST_RING_SIZE_QCA6290);
  8468. soc->ast_override_support = 1;
  8469. break;
  8470. #ifdef QCA_WIFI_QCA6390
  8471. case TARGET_TYPE_QCA6390:
  8472. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8473. REO_DST_RING_SIZE_QCA6290);
  8474. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8475. soc->ast_override_support = 1;
  8476. if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  8477. int int_ctx;
  8478. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  8479. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  8480. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  8481. }
  8482. }
  8483. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  8484. break;
  8485. #endif
  8486. case TARGET_TYPE_QCA8074:
  8487. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8488. REO_DST_RING_SIZE_QCA8074);
  8489. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8490. break;
  8491. case TARGET_TYPE_QCA8074V2:
  8492. case TARGET_TYPE_QCA6018:
  8493. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8494. REO_DST_RING_SIZE_QCA8074);
  8495. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  8496. soc->hw_nac_monitor_support = 1;
  8497. soc->ast_override_support = 1;
  8498. soc->per_tid_basize_max_tid = 8;
  8499. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  8500. break;
  8501. default:
  8502. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  8503. qdf_assert_always(0);
  8504. break;
  8505. }
  8506. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  8507. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  8508. soc->cce_disable = false;
  8509. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  8510. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8511. CDP_CFG_MAX_PEER_ID);
  8512. if (ret != -EINVAL) {
  8513. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  8514. }
  8515. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8516. CDP_CFG_CCE_DISABLE);
  8517. if (ret == 1)
  8518. soc->cce_disable = true;
  8519. }
  8520. qdf_spinlock_create(&soc->peer_ref_mutex);
  8521. qdf_spinlock_create(&soc->ast_lock);
  8522. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  8523. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  8524. /* fill the tx/rx cpu ring map*/
  8525. dp_soc_set_txrx_ring_map(soc);
  8526. qdf_spinlock_create(&soc->htt_stats.lock);
  8527. /* initialize work queue for stats processing */
  8528. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  8529. return soc;
  8530. }
  8531. /**
  8532. * dp_soc_init_wifi3() - Initialize txrx SOC
  8533. * @dp_soc: Opaque DP SOC handle
  8534. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  8535. * @hif_handle: Opaque HIF handle
  8536. * @htc_handle: Opaque HTC handle
  8537. * @qdf_osdev: QDF device (Unused)
  8538. * @ol_ops: Offload Operations (Unused)
  8539. * @device_id: Device ID (Unused)
  8540. *
  8541. * Return: DP SOC handle on success, NULL on failure
  8542. */
  8543. void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
  8544. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8545. struct ol_if_ops *ol_ops, uint16_t device_id)
  8546. {
  8547. return dp_soc_init(dpsoc, htc_handle, hif_handle);
  8548. }
  8549. #endif
  8550. /*
  8551. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  8552. *
  8553. * @soc: handle to DP soc
  8554. * @mac_id: MAC id
  8555. *
  8556. * Return: Return pdev corresponding to MAC
  8557. */
  8558. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  8559. {
  8560. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  8561. return soc->pdev_list[mac_id];
  8562. /* Typically for MCL as there only 1 PDEV*/
  8563. return soc->pdev_list[0];
  8564. }
  8565. /*
  8566. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  8567. * @soc: DP SoC context
  8568. * @max_mac_rings: No of MAC rings
  8569. *
  8570. * Return: None
  8571. */
  8572. static
  8573. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  8574. int *max_mac_rings)
  8575. {
  8576. bool dbs_enable = false;
  8577. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  8578. dbs_enable = soc->cdp_soc.ol_ops->
  8579. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  8580. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  8581. }
  8582. /*
  8583. * dp_set_pktlog_wifi3() - attach txrx vdev
  8584. * @pdev: Datapath PDEV handle
  8585. * @event: which event's notifications are being subscribed to
  8586. * @enable: WDI event subscribe or not. (True or False)
  8587. *
  8588. * Return: Success, NULL on failure
  8589. */
  8590. #ifdef WDI_EVENT_ENABLE
  8591. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  8592. bool enable)
  8593. {
  8594. struct dp_soc *soc = NULL;
  8595. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  8596. int max_mac_rings = wlan_cfg_get_num_mac_rings
  8597. (pdev->wlan_cfg_ctx);
  8598. uint8_t mac_id = 0;
  8599. soc = pdev->soc;
  8600. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  8601. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  8602. FL("Max_mac_rings %d "),
  8603. max_mac_rings);
  8604. if (enable) {
  8605. switch (event) {
  8606. case WDI_EVENT_RX_DESC:
  8607. if (pdev->monitor_vdev) {
  8608. /* Nothing needs to be done if monitor mode is
  8609. * enabled
  8610. */
  8611. return 0;
  8612. }
  8613. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  8614. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  8615. htt_tlv_filter.mpdu_start = 1;
  8616. htt_tlv_filter.msdu_start = 1;
  8617. htt_tlv_filter.msdu_end = 1;
  8618. htt_tlv_filter.mpdu_end = 1;
  8619. htt_tlv_filter.packet_header = 1;
  8620. htt_tlv_filter.attention = 1;
  8621. htt_tlv_filter.ppdu_start = 1;
  8622. htt_tlv_filter.ppdu_end = 1;
  8623. htt_tlv_filter.ppdu_end_user_stats = 1;
  8624. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8625. htt_tlv_filter.ppdu_end_status_done = 1;
  8626. htt_tlv_filter.enable_fp = 1;
  8627. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8628. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8629. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8630. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8631. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8632. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8633. for (mac_id = 0; mac_id < max_mac_rings;
  8634. mac_id++) {
  8635. int mac_for_pdev =
  8636. dp_get_mac_id_for_pdev(mac_id,
  8637. pdev->pdev_id);
  8638. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8639. mac_for_pdev,
  8640. pdev->rxdma_mon_status_ring[mac_id]
  8641. .hal_srng,
  8642. RXDMA_MONITOR_STATUS,
  8643. RX_BUFFER_SIZE,
  8644. &htt_tlv_filter);
  8645. }
  8646. if (soc->reap_timer_init)
  8647. qdf_timer_mod(&soc->mon_reap_timer,
  8648. DP_INTR_POLL_TIMER_MS);
  8649. }
  8650. break;
  8651. case WDI_EVENT_LITE_RX:
  8652. if (pdev->monitor_vdev) {
  8653. /* Nothing needs to be done if monitor mode is
  8654. * enabled
  8655. */
  8656. return 0;
  8657. }
  8658. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  8659. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  8660. htt_tlv_filter.ppdu_start = 1;
  8661. htt_tlv_filter.ppdu_end = 1;
  8662. htt_tlv_filter.ppdu_end_user_stats = 1;
  8663. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8664. htt_tlv_filter.ppdu_end_status_done = 1;
  8665. htt_tlv_filter.mpdu_start = 1;
  8666. htt_tlv_filter.enable_fp = 1;
  8667. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8668. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8669. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8670. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8671. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8672. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8673. for (mac_id = 0; mac_id < max_mac_rings;
  8674. mac_id++) {
  8675. int mac_for_pdev =
  8676. dp_get_mac_id_for_pdev(mac_id,
  8677. pdev->pdev_id);
  8678. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8679. mac_for_pdev,
  8680. pdev->rxdma_mon_status_ring[mac_id]
  8681. .hal_srng,
  8682. RXDMA_MONITOR_STATUS,
  8683. RX_BUFFER_SIZE_PKTLOG_LITE,
  8684. &htt_tlv_filter);
  8685. }
  8686. if (soc->reap_timer_init)
  8687. qdf_timer_mod(&soc->mon_reap_timer,
  8688. DP_INTR_POLL_TIMER_MS);
  8689. }
  8690. break;
  8691. case WDI_EVENT_LITE_T2H:
  8692. if (pdev->monitor_vdev) {
  8693. /* Nothing needs to be done if monitor mode is
  8694. * enabled
  8695. */
  8696. return 0;
  8697. }
  8698. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8699. int mac_for_pdev = dp_get_mac_id_for_pdev(
  8700. mac_id, pdev->pdev_id);
  8701. pdev->pktlog_ppdu_stats = true;
  8702. dp_h2t_cfg_stats_msg_send(pdev,
  8703. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  8704. mac_for_pdev);
  8705. }
  8706. break;
  8707. default:
  8708. /* Nothing needs to be done for other pktlog types */
  8709. break;
  8710. }
  8711. } else {
  8712. switch (event) {
  8713. case WDI_EVENT_RX_DESC:
  8714. case WDI_EVENT_LITE_RX:
  8715. if (pdev->monitor_vdev) {
  8716. /* Nothing needs to be done if monitor mode is
  8717. * enabled
  8718. */
  8719. return 0;
  8720. }
  8721. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  8722. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  8723. for (mac_id = 0; mac_id < max_mac_rings;
  8724. mac_id++) {
  8725. int mac_for_pdev =
  8726. dp_get_mac_id_for_pdev(mac_id,
  8727. pdev->pdev_id);
  8728. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8729. mac_for_pdev,
  8730. pdev->rxdma_mon_status_ring[mac_id]
  8731. .hal_srng,
  8732. RXDMA_MONITOR_STATUS,
  8733. RX_BUFFER_SIZE,
  8734. &htt_tlv_filter);
  8735. }
  8736. if (soc->reap_timer_init)
  8737. qdf_timer_stop(&soc->mon_reap_timer);
  8738. }
  8739. break;
  8740. case WDI_EVENT_LITE_T2H:
  8741. if (pdev->monitor_vdev) {
  8742. /* Nothing needs to be done if monitor mode is
  8743. * enabled
  8744. */
  8745. return 0;
  8746. }
  8747. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  8748. * passing value 0. Once these macros will define in htt
  8749. * header file will use proper macros
  8750. */
  8751. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8752. int mac_for_pdev =
  8753. dp_get_mac_id_for_pdev(mac_id,
  8754. pdev->pdev_id);
  8755. pdev->pktlog_ppdu_stats = false;
  8756. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  8757. dp_h2t_cfg_stats_msg_send(pdev, 0,
  8758. mac_for_pdev);
  8759. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  8760. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  8761. mac_for_pdev);
  8762. } else if (pdev->enhanced_stats_en) {
  8763. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  8764. mac_for_pdev);
  8765. }
  8766. }
  8767. break;
  8768. default:
  8769. /* Nothing needs to be done for other pktlog types */
  8770. break;
  8771. }
  8772. }
  8773. return 0;
  8774. }
  8775. #endif