dp_main.c 296 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_types.h"
  30. #include "dp_internal.h"
  31. #include "dp_tx.h"
  32. #include "dp_tx_desc.h"
  33. #include "dp_rx.h"
  34. #include "dp_rx_mon.h"
  35. #ifdef DP_RATETABLE_SUPPORT
  36. #include "dp_ratetable.h"
  37. #endif
  38. #include <cdp_txrx_handle.h>
  39. #include <wlan_cfg.h>
  40. #include "cdp_txrx_cmn_struct.h"
  41. #include "cdp_txrx_stats_struct.h"
  42. #include "cdp_txrx_cmn_reg.h"
  43. #include <qdf_util.h>
  44. #include "dp_peer.h"
  45. #include "dp_rx_mon.h"
  46. #include "htt_stats.h"
  47. #include "dp_htt.h"
  48. #include "htt_ppdu_stats.h"
  49. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  50. #include "cfg_ucfg_api.h"
  51. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  52. #include "cdp_txrx_flow_ctrl_v2.h"
  53. #else
  54. static inline void
  55. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  56. {
  57. return;
  58. }
  59. #endif
  60. #include "dp_ipa.h"
  61. #include "dp_cal_client_api.h"
  62. #ifdef FEATURE_WDS
  63. #include "dp_txrx_wds.h"
  64. #endif
  65. #ifdef ATH_SUPPORT_IQUE
  66. #include "dp_txrx_me.h"
  67. #endif
  68. #if defined(DP_CON_MON)
  69. #ifndef REMOVE_PKT_LOG
  70. #include <pktlog_ac_api.h>
  71. #include <pktlog_ac.h>
  72. #endif
  73. #endif
  74. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  75. /*
  76. * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
  77. * also should be updated accordingly
  78. */
  79. QDF_COMPILE_TIME_ASSERT(num_intr_grps,
  80. HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
  81. /*
  82. * HIF_EVENT_HIST_MAX should always be power of 2
  83. */
  84. QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
  85. (HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
  86. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  87. /*
  88. * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
  89. * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
  90. */
  91. QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
  92. WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
  93. WLAN_CFG_INT_NUM_CONTEXTS);
  94. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  95. #include "dp_rx_mon_feature.h"
  96. #else
  97. /*
  98. * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
  99. * @pdev_handle: DP_PDEV handle
  100. * @val: user provided value
  101. *
  102. * Return: QDF_STATUS
  103. */
  104. static QDF_STATUS
  105. dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
  106. {
  107. return QDF_STATUS_E_INVAL;
  108. }
  109. #endif /* WLAN_RX_PKT_CAPTURE_ENH */
  110. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  111. #include "dp_tx_capture.h"
  112. #else
  113. /*
  114. * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
  115. * @pdev_handle: DP_PDEV handle
  116. * @val: user provided value
  117. *
  118. * Return: QDF_STATUS
  119. */
  120. static QDF_STATUS
  121. dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
  122. {
  123. return QDF_STATUS_E_INVAL;
  124. }
  125. #endif
  126. void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
  127. struct hif_opaque_softc *hif_handle);
  128. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  129. static struct dp_soc *
  130. dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
  131. qdf_device_t qdf_osdev,
  132. struct ol_if_ops *ol_ops, uint16_t device_id);
  133. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  134. static inline void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
  135. uint8_t vdev_id,
  136. uint8_t *peer_mac_addr);
  137. static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  138. uint8_t *peer_mac, uint32_t bitmap);
  139. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  140. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  141. static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
  142. bool unmap_only);
  143. #ifdef ENABLE_VERBOSE_DEBUG
  144. bool is_dp_verbose_debug_enabled;
  145. #endif
  146. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  147. static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
  148. uint8_t pdev_id,
  149. bool enable,
  150. struct cdp_monitor_filter *filter_val);
  151. #endif
  152. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
  153. enum hal_ring_type ring_type,
  154. int ring_num);
  155. #define DP_INTR_POLL_TIMER_MS 10
  156. /* Generic AST entry aging timer value */
  157. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  158. #define DP_MCS_LENGTH (6*MAX_MCS)
  159. #define DP_CURR_FW_STATS_AVAIL 19
  160. #define DP_HTT_DBG_EXT_STATS_MAX 256
  161. #define DP_MAX_SLEEP_TIME 100
  162. #ifndef QCA_WIFI_3_0_EMU
  163. #define SUSPEND_DRAIN_WAIT 500
  164. #else
  165. #define SUSPEND_DRAIN_WAIT 3000
  166. #endif
  167. #ifdef IPA_OFFLOAD
  168. /* Exclude IPA rings from the interrupt context */
  169. #define TX_RING_MASK_VAL 0xb
  170. #define RX_RING_MASK_VAL 0x7
  171. #else
  172. #define TX_RING_MASK_VAL 0xF
  173. #define RX_RING_MASK_VAL 0xF
  174. #endif
  175. #define STR_MAXLEN 64
  176. #define RNG_ERR "SRNG setup failed for"
  177. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  178. #define DP_RX_CACHED_BUFQ_THRESH 64
  179. /**
  180. * default_dscp_tid_map - Default DSCP-TID mapping
  181. *
  182. * DSCP TID
  183. * 000000 0
  184. * 001000 1
  185. * 010000 2
  186. * 011000 3
  187. * 100000 4
  188. * 101000 5
  189. * 110000 6
  190. * 111000 7
  191. */
  192. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  193. 0, 0, 0, 0, 0, 0, 0, 0,
  194. 1, 1, 1, 1, 1, 1, 1, 1,
  195. 2, 2, 2, 2, 2, 2, 2, 2,
  196. 3, 3, 3, 3, 3, 3, 3, 3,
  197. 4, 4, 4, 4, 4, 4, 4, 4,
  198. 5, 5, 5, 5, 5, 5, 5, 5,
  199. 6, 6, 6, 6, 6, 6, 6, 6,
  200. 7, 7, 7, 7, 7, 7, 7, 7,
  201. };
  202. /**
  203. * default_pcp_tid_map - Default PCP-TID mapping
  204. *
  205. * PCP TID
  206. * 000 0
  207. * 001 1
  208. * 010 2
  209. * 011 3
  210. * 100 4
  211. * 101 5
  212. * 110 6
  213. * 111 7
  214. */
  215. static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
  216. 0, 1, 2, 3, 4, 5, 6, 7,
  217. };
  218. /**
  219. * @brief Cpu to tx ring map
  220. */
  221. uint8_t
  222. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
  223. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  224. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  225. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  226. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  227. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
  228. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  229. {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
  230. #endif
  231. };
  232. /**
  233. * @brief Select the type of statistics
  234. */
  235. enum dp_stats_type {
  236. STATS_FW = 0,
  237. STATS_HOST = 1,
  238. STATS_TYPE_MAX = 2,
  239. };
  240. /**
  241. * @brief General Firmware statistics options
  242. *
  243. */
  244. enum dp_fw_stats {
  245. TXRX_FW_STATS_INVALID = -1,
  246. };
  247. /**
  248. * dp_stats_mapping_table - Firmware and Host statistics
  249. * currently supported
  250. */
  251. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  252. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  253. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  254. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  255. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  256. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  257. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  258. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  259. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  260. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  261. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  262. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  263. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  264. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  265. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  266. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  267. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  268. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  269. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  270. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  271. /* Last ENUM for HTT FW STATS */
  272. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  273. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  274. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  275. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  276. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  277. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  278. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  279. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  280. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  281. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  282. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  283. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  284. {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
  285. };
  286. /* MCL specific functions */
  287. #if defined(DP_CON_MON)
  288. /**
  289. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  290. * @soc: pointer to dp_soc handle
  291. * @intr_ctx_num: interrupt context number for which mon mask is needed
  292. *
  293. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  294. * This function is returning 0, since in interrupt mode(softirq based RX),
  295. * we donot want to process monitor mode rings in a softirq.
  296. *
  297. * So, in case packet log is enabled for SAP/STA/P2P modes,
  298. * regular interrupt processing will not process monitor mode rings. It would be
  299. * done in a separate timer context.
  300. *
  301. * Return: 0
  302. */
  303. static inline
  304. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  305. {
  306. return 0;
  307. }
  308. /*
  309. * dp_service_mon_rings()- timer to reap monitor rings
  310. * reqd as we are not getting ppdu end interrupts
  311. * @arg: SoC Handle
  312. *
  313. * Return:
  314. *
  315. */
  316. static void dp_service_mon_rings(void *arg)
  317. {
  318. struct dp_soc *soc = (struct dp_soc *)arg;
  319. int ring = 0, work_done, mac_id;
  320. struct dp_pdev *pdev = NULL;
  321. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  322. pdev = soc->pdev_list[ring];
  323. if (!pdev)
  324. continue;
  325. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  326. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  327. pdev->pdev_id);
  328. work_done = dp_mon_process(soc, mac_for_pdev,
  329. QCA_NAPI_BUDGET);
  330. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  331. FL("Reaped %d descs from Monitor rings"),
  332. work_done);
  333. }
  334. }
  335. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  336. }
  337. #ifndef REMOVE_PKT_LOG
  338. /**
  339. * dp_pkt_log_init() - API to initialize packet log
  340. * @soc_hdl: Datapath soc handle
  341. * @pdev_id: id of data path pdev handle
  342. * @scn: HIF context
  343. *
  344. * Return: none
  345. */
  346. void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
  347. {
  348. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  349. struct dp_pdev *handle =
  350. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  351. if (!handle) {
  352. dp_err("pdev handle is NULL");
  353. return;
  354. }
  355. if (handle->pkt_log_init) {
  356. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  357. "%s: Packet log not initialized", __func__);
  358. return;
  359. }
  360. pktlog_sethandle(&handle->pl_dev, scn);
  361. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  362. if (pktlogmod_init(scn)) {
  363. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  364. "%s: pktlogmod_init failed", __func__);
  365. handle->pkt_log_init = false;
  366. } else {
  367. handle->pkt_log_init = true;
  368. }
  369. }
  370. /**
  371. * dp_pkt_log_con_service() - connect packet log service
  372. * @soc_hdl: Datapath soc handle
  373. * @pdev_id: id of data path pdev handle
  374. * @scn: device context
  375. *
  376. * Return: none
  377. */
  378. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  379. uint8_t pdev_id, void *scn)
  380. {
  381. dp_pkt_log_init(soc_hdl, pdev_id, scn);
  382. pktlog_htc_attach();
  383. }
  384. /**
  385. * dp_get_num_rx_contexts() - get number of RX contexts
  386. * @soc_hdl: cdp opaque soc handle
  387. *
  388. * Return: number of RX contexts
  389. */
  390. static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
  391. {
  392. int i;
  393. int num_rx_contexts = 0;
  394. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  395. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  396. if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
  397. num_rx_contexts++;
  398. return num_rx_contexts;
  399. }
  400. /**
  401. * dp_pktlogmod_exit() - API to cleanup pktlog info
  402. * @pdev: Pdev handle
  403. *
  404. * Return: none
  405. */
  406. static void dp_pktlogmod_exit(struct dp_pdev *pdev)
  407. {
  408. struct dp_soc *soc = pdev->soc;
  409. struct hif_opaque_softc *scn = soc->hif_handle;
  410. if (!scn) {
  411. dp_err("Invalid hif(scn) handle");
  412. return;
  413. }
  414. /* stop mon_reap_timer if it has been started */
  415. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
  416. soc->reap_timer_init)
  417. qdf_timer_sync_cancel(&soc->mon_reap_timer);
  418. pktlogmod_exit(scn);
  419. pdev->pkt_log_init = false;
  420. }
  421. #endif
  422. #else
  423. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  424. /**
  425. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  426. * @soc: pointer to dp_soc handle
  427. * @intr_ctx_num: interrupt context number for which mon mask is needed
  428. *
  429. * Return: mon mask value
  430. */
  431. static inline
  432. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  433. {
  434. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  435. }
  436. #endif
  437. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  438. uint8_t vdev_id,
  439. uint8_t *peer_mac,
  440. uint8_t *mac_addr,
  441. enum cdp_txrx_ast_entry_type type,
  442. uint32_t flags)
  443. {
  444. int ret = -1;
  445. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
  446. peer_mac, 0, vdev_id);
  447. if (!peer || peer->delete_in_progress) {
  448. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  449. "%s: Peer is NULL!\n", __func__);
  450. goto fail;
  451. }
  452. ret = dp_peer_add_ast((struct dp_soc *)soc_hdl,
  453. peer,
  454. mac_addr,
  455. type,
  456. flags);
  457. fail:
  458. if (peer)
  459. dp_peer_unref_delete(peer);
  460. return ret;
  461. }
  462. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  463. uint8_t vdev_id,
  464. uint8_t *peer_mac,
  465. uint8_t *wds_macaddr,
  466. uint32_t flags)
  467. {
  468. int status = -1;
  469. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  470. struct dp_ast_entry *ast_entry = NULL;
  471. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
  472. peer_mac, 0, vdev_id);
  473. if (!peer || peer->delete_in_progress) {
  474. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  475. "%s: Peer is NULL!\n", __func__);
  476. goto fail;
  477. }
  478. qdf_spin_lock_bh(&soc->ast_lock);
  479. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  480. peer->vdev->pdev->pdev_id);
  481. if (ast_entry) {
  482. status = dp_peer_update_ast(soc,
  483. peer,
  484. ast_entry, flags);
  485. }
  486. qdf_spin_unlock_bh(&soc->ast_lock);
  487. fail:
  488. if (peer)
  489. dp_peer_unref_delete(peer);
  490. return status;
  491. }
  492. /*
  493. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  494. * @soc_handle: Datapath SOC handle
  495. * @wds_macaddr: WDS entry MAC Address
  496. * @peer_macaddr: WDS entry MAC Address
  497. * @vdev_id: id of vdev handle
  498. * Return: QDF_STATUS
  499. */
  500. static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  501. uint8_t *wds_macaddr,
  502. uint8_t *peer_mac_addr,
  503. uint8_t vdev_id)
  504. {
  505. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  506. struct dp_ast_entry *ast_entry = NULL;
  507. struct dp_ast_entry *tmp_ast_entry;
  508. struct dp_peer *peer;
  509. struct dp_pdev *pdev;
  510. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  511. if (!vdev)
  512. return QDF_STATUS_E_FAILURE;
  513. pdev = vdev->pdev;
  514. if (peer_mac_addr) {
  515. peer = dp_peer_find_hash_find(soc, peer_mac_addr,
  516. 0, vdev->vdev_id);
  517. if (!peer) {
  518. return QDF_STATUS_E_FAILURE;
  519. }
  520. if (peer->delete_in_progress) {
  521. dp_peer_unref_delete(peer);
  522. return QDF_STATUS_E_FAILURE;
  523. }
  524. qdf_spin_lock_bh(&soc->ast_lock);
  525. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
  526. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
  527. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  528. dp_peer_del_ast(soc, ast_entry);
  529. }
  530. qdf_spin_unlock_bh(&soc->ast_lock);
  531. dp_peer_unref_delete(peer);
  532. return QDF_STATUS_SUCCESS;
  533. } else if (wds_macaddr) {
  534. qdf_spin_lock_bh(&soc->ast_lock);
  535. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  536. pdev->pdev_id);
  537. if (ast_entry) {
  538. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
  539. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  540. dp_peer_del_ast(soc, ast_entry);
  541. }
  542. qdf_spin_unlock_bh(&soc->ast_lock);
  543. }
  544. return QDF_STATUS_SUCCESS;
  545. }
  546. /*
  547. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  548. * @soc: Datapath SOC handle
  549. *
  550. * Return: QDF_STATUS
  551. */
  552. static QDF_STATUS
  553. dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  554. uint8_t vdev_id)
  555. {
  556. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  557. struct dp_pdev *pdev;
  558. struct dp_vdev *vdev;
  559. struct dp_peer *peer;
  560. struct dp_ast_entry *ase, *temp_ase;
  561. int i;
  562. qdf_spin_lock_bh(&soc->ast_lock);
  563. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  564. pdev = soc->pdev_list[i];
  565. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  566. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  567. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  568. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  569. if ((ase->type ==
  570. CDP_TXRX_AST_TYPE_WDS_HM) ||
  571. (ase->type ==
  572. CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  573. dp_peer_del_ast(soc, ase);
  574. }
  575. }
  576. }
  577. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  578. }
  579. qdf_spin_unlock_bh(&soc->ast_lock);
  580. return QDF_STATUS_SUCCESS;
  581. }
  582. /*
  583. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  584. * @soc: Datapath SOC handle
  585. *
  586. * Return: None
  587. */
  588. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  589. {
  590. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  591. struct dp_pdev *pdev;
  592. struct dp_vdev *vdev;
  593. struct dp_peer *peer;
  594. struct dp_ast_entry *ase, *temp_ase;
  595. int i;
  596. qdf_spin_lock_bh(&soc->ast_lock);
  597. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  598. pdev = soc->pdev_list[i];
  599. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  600. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  601. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  602. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  603. if ((ase->type ==
  604. CDP_TXRX_AST_TYPE_STATIC) ||
  605. (ase->type ==
  606. CDP_TXRX_AST_TYPE_SELF) ||
  607. (ase->type ==
  608. CDP_TXRX_AST_TYPE_STA_BSS))
  609. continue;
  610. dp_peer_del_ast(soc, ase);
  611. }
  612. }
  613. }
  614. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  615. }
  616. qdf_spin_unlock_bh(&soc->ast_lock);
  617. }
  618. /**
  619. * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
  620. * and return ast entry information
  621. * of first ast entry found in the
  622. * table with given mac address
  623. *
  624. * @soc : data path soc handle
  625. * @ast_mac_addr : AST entry mac address
  626. * @ast_entry_info : ast entry information
  627. *
  628. * return : true if ast entry found with ast_mac_addr
  629. * false if ast entry not found
  630. */
  631. static bool dp_peer_get_ast_info_by_soc_wifi3
  632. (struct cdp_soc_t *soc_hdl,
  633. uint8_t *ast_mac_addr,
  634. struct cdp_ast_entry_info *ast_entry_info)
  635. {
  636. struct dp_ast_entry *ast_entry = NULL;
  637. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  638. qdf_spin_lock_bh(&soc->ast_lock);
  639. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  640. if (!ast_entry || !ast_entry->peer) {
  641. qdf_spin_unlock_bh(&soc->ast_lock);
  642. return false;
  643. }
  644. if (ast_entry->delete_in_progress && !ast_entry->callback) {
  645. qdf_spin_unlock_bh(&soc->ast_lock);
  646. return false;
  647. }
  648. ast_entry_info->type = ast_entry->type;
  649. ast_entry_info->pdev_id = ast_entry->pdev_id;
  650. ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
  651. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  652. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  653. &ast_entry->peer->mac_addr.raw[0],
  654. QDF_MAC_ADDR_SIZE);
  655. qdf_spin_unlock_bh(&soc->ast_lock);
  656. return true;
  657. }
  658. /**
  659. * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
  660. * and return ast entry information
  661. * if mac address and pdev_id matches
  662. *
  663. * @soc : data path soc handle
  664. * @ast_mac_addr : AST entry mac address
  665. * @pdev_id : pdev_id
  666. * @ast_entry_info : ast entry information
  667. *
  668. * return : true if ast entry found with ast_mac_addr
  669. * false if ast entry not found
  670. */
  671. static bool dp_peer_get_ast_info_by_pdevid_wifi3
  672. (struct cdp_soc_t *soc_hdl,
  673. uint8_t *ast_mac_addr,
  674. uint8_t pdev_id,
  675. struct cdp_ast_entry_info *ast_entry_info)
  676. {
  677. struct dp_ast_entry *ast_entry;
  678. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  679. qdf_spin_lock_bh(&soc->ast_lock);
  680. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  681. if (!ast_entry || !ast_entry->peer) {
  682. qdf_spin_unlock_bh(&soc->ast_lock);
  683. return false;
  684. }
  685. if (ast_entry->delete_in_progress && !ast_entry->callback) {
  686. qdf_spin_unlock_bh(&soc->ast_lock);
  687. return false;
  688. }
  689. ast_entry_info->type = ast_entry->type;
  690. ast_entry_info->pdev_id = ast_entry->pdev_id;
  691. ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
  692. ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
  693. qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
  694. &ast_entry->peer->mac_addr.raw[0],
  695. QDF_MAC_ADDR_SIZE);
  696. qdf_spin_unlock_bh(&soc->ast_lock);
  697. return true;
  698. }
  699. /**
  700. * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
  701. * with given mac address
  702. *
  703. * @soc : data path soc handle
  704. * @ast_mac_addr : AST entry mac address
  705. * @callback : callback function to called on ast delete response from FW
  706. * @cookie : argument to be passed to callback
  707. *
  708. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  709. * is sent
  710. * QDF_STATUS_E_INVAL false if ast entry not found
  711. */
  712. static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
  713. uint8_t *mac_addr,
  714. txrx_ast_free_cb callback,
  715. void *cookie)
  716. {
  717. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  718. struct dp_ast_entry *ast_entry = NULL;
  719. txrx_ast_free_cb cb = NULL;
  720. void *arg = NULL;
  721. qdf_spin_lock_bh(&soc->ast_lock);
  722. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  723. if (!ast_entry) {
  724. qdf_spin_unlock_bh(&soc->ast_lock);
  725. return -QDF_STATUS_E_INVAL;
  726. }
  727. if (ast_entry->callback) {
  728. cb = ast_entry->callback;
  729. arg = ast_entry->cookie;
  730. }
  731. ast_entry->callback = callback;
  732. ast_entry->cookie = cookie;
  733. /*
  734. * if delete_in_progress is set AST delete is sent to target
  735. * and host is waiting for response should not send delete
  736. * again
  737. */
  738. if (!ast_entry->delete_in_progress)
  739. dp_peer_del_ast(soc, ast_entry);
  740. qdf_spin_unlock_bh(&soc->ast_lock);
  741. if (cb) {
  742. cb(soc->ctrl_psoc,
  743. dp_soc_to_cdp_soc(soc),
  744. arg,
  745. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  746. }
  747. return QDF_STATUS_SUCCESS;
  748. }
  749. /**
  750. * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
  751. * table if mac address and pdev_id matches
  752. *
  753. * @soc : data path soc handle
  754. * @ast_mac_addr : AST entry mac address
  755. * @pdev_id : pdev id
  756. * @callback : callback function to called on ast delete response from FW
  757. * @cookie : argument to be passed to callback
  758. *
  759. * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  760. * is sent
  761. * QDF_STATUS_E_INVAL false if ast entry not found
  762. */
  763. static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
  764. uint8_t *mac_addr,
  765. uint8_t pdev_id,
  766. txrx_ast_free_cb callback,
  767. void *cookie)
  768. {
  769. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  770. struct dp_ast_entry *ast_entry;
  771. txrx_ast_free_cb cb = NULL;
  772. void *arg = NULL;
  773. qdf_spin_lock_bh(&soc->ast_lock);
  774. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
  775. if (!ast_entry) {
  776. qdf_spin_unlock_bh(&soc->ast_lock);
  777. return -QDF_STATUS_E_INVAL;
  778. }
  779. if (ast_entry->callback) {
  780. cb = ast_entry->callback;
  781. arg = ast_entry->cookie;
  782. }
  783. ast_entry->callback = callback;
  784. ast_entry->cookie = cookie;
  785. /*
  786. * if delete_in_progress is set AST delete is sent to target
  787. * and host is waiting for response should not sent delete
  788. * again
  789. */
  790. if (!ast_entry->delete_in_progress)
  791. dp_peer_del_ast(soc, ast_entry);
  792. qdf_spin_unlock_bh(&soc->ast_lock);
  793. if (cb) {
  794. cb(soc->ctrl_psoc,
  795. dp_soc_to_cdp_soc(soc),
  796. arg,
  797. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  798. }
  799. return QDF_STATUS_SUCCESS;
  800. }
  801. /**
  802. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  803. * @ring_num: ring num of the ring being queried
  804. * @grp_mask: the grp_mask array for the ring type in question.
  805. *
  806. * The grp_mask array is indexed by group number and the bit fields correspond
  807. * to ring numbers. We are finding which interrupt group a ring belongs to.
  808. *
  809. * Return: the index in the grp_mask array with the ring number.
  810. * -QDF_STATUS_E_NOENT if no entry is found
  811. */
  812. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  813. {
  814. int ext_group_num;
  815. int mask = 1 << ring_num;
  816. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  817. ext_group_num++) {
  818. if (mask & grp_mask[ext_group_num])
  819. return ext_group_num;
  820. }
  821. return -QDF_STATUS_E_NOENT;
  822. }
  823. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  824. enum hal_ring_type ring_type,
  825. int ring_num)
  826. {
  827. int *grp_mask;
  828. switch (ring_type) {
  829. case WBM2SW_RELEASE:
  830. /* dp_tx_comp_handler - soc->tx_comp_ring */
  831. if (ring_num < 3)
  832. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  833. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  834. else if (ring_num == 3) {
  835. /* sw treats this as a separate ring type */
  836. grp_mask = &soc->wlan_cfg_ctx->
  837. int_rx_wbm_rel_ring_mask[0];
  838. ring_num = 0;
  839. } else {
  840. qdf_assert(0);
  841. return -QDF_STATUS_E_NOENT;
  842. }
  843. break;
  844. case REO_EXCEPTION:
  845. /* dp_rx_err_process - &soc->reo_exception_ring */
  846. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  847. break;
  848. case REO_DST:
  849. /* dp_rx_process - soc->reo_dest_ring */
  850. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  851. break;
  852. case REO_STATUS:
  853. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  854. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  855. break;
  856. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  857. case RXDMA_MONITOR_STATUS:
  858. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  859. case RXDMA_MONITOR_DST:
  860. /* dp_mon_process */
  861. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  862. break;
  863. case RXDMA_DST:
  864. /* dp_rxdma_err_process */
  865. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  866. break;
  867. case RXDMA_BUF:
  868. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  869. break;
  870. case RXDMA_MONITOR_BUF:
  871. /* TODO: support low_thresh interrupt */
  872. return -QDF_STATUS_E_NOENT;
  873. break;
  874. case TCL_DATA:
  875. case TCL_CMD:
  876. case REO_CMD:
  877. case SW2WBM_RELEASE:
  878. case WBM_IDLE_LINK:
  879. /* normally empty SW_TO_HW rings */
  880. return -QDF_STATUS_E_NOENT;
  881. break;
  882. case TCL_STATUS:
  883. case REO_REINJECT:
  884. /* misc unused rings */
  885. return -QDF_STATUS_E_NOENT;
  886. break;
  887. case CE_SRC:
  888. case CE_DST:
  889. case CE_DST_STATUS:
  890. /* CE_rings - currently handled by hif */
  891. default:
  892. return -QDF_STATUS_E_NOENT;
  893. break;
  894. }
  895. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  896. }
  897. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  898. *ring_params, int ring_type, int ring_num)
  899. {
  900. int msi_group_number;
  901. int msi_data_count;
  902. int ret;
  903. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  904. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  905. &msi_data_count, &msi_data_start,
  906. &msi_irq_start);
  907. if (ret)
  908. return;
  909. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  910. ring_num);
  911. if (msi_group_number < 0) {
  912. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  913. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  914. ring_type, ring_num);
  915. ring_params->msi_addr = 0;
  916. ring_params->msi_data = 0;
  917. return;
  918. }
  919. if (msi_group_number > msi_data_count) {
  920. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  921. FL("2 msi_groups will share an msi; msi_group_num %d"),
  922. msi_group_number);
  923. QDF_ASSERT(0);
  924. }
  925. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  926. ring_params->msi_addr = addr_low;
  927. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  928. ring_params->msi_data = (msi_group_number % msi_data_count)
  929. + msi_data_start;
  930. ring_params->flags |= HAL_SRNG_MSI_INTR;
  931. }
  932. /**
  933. * dp_print_ast_stats() - Dump AST table contents
  934. * @soc: Datapath soc handle
  935. *
  936. * return void
  937. */
  938. #ifdef FEATURE_AST
  939. void dp_print_ast_stats(struct dp_soc *soc)
  940. {
  941. uint8_t i;
  942. uint8_t num_entries = 0;
  943. struct dp_vdev *vdev;
  944. struct dp_pdev *pdev;
  945. struct dp_peer *peer;
  946. struct dp_ast_entry *ase, *tmp_ase;
  947. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  948. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  949. "DA", "HMWDS_SEC"};
  950. DP_PRINT_STATS("AST Stats:");
  951. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  952. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  953. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  954. DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err);
  955. DP_PRINT_STATS("AST Table:");
  956. qdf_spin_lock_bh(&soc->ast_lock);
  957. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  958. pdev = soc->pdev_list[i];
  959. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  960. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  961. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  962. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  963. DP_PRINT_STATS("%6d mac_addr = %pM"
  964. " peer_mac_addr = %pM"
  965. " peer_id = %u"
  966. " type = %s"
  967. " next_hop = %d"
  968. " is_active = %d"
  969. " ast_idx = %d"
  970. " ast_hash = %d"
  971. " delete_in_progress = %d"
  972. " pdev_id = %d"
  973. " vdev_id = %d",
  974. ++num_entries,
  975. ase->mac_addr.raw,
  976. ase->peer->mac_addr.raw,
  977. ase->peer->peer_ids[0],
  978. type[ase->type],
  979. ase->next_hop,
  980. ase->is_active,
  981. ase->ast_idx,
  982. ase->ast_hash_value,
  983. ase->delete_in_progress,
  984. ase->pdev_id,
  985. vdev->vdev_id);
  986. }
  987. }
  988. }
  989. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  990. }
  991. qdf_spin_unlock_bh(&soc->ast_lock);
  992. }
  993. #else
  994. void dp_print_ast_stats(struct dp_soc *soc)
  995. {
  996. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  997. return;
  998. }
  999. #endif
  1000. /**
  1001. * dp_print_peer_table() - Dump all Peer stats
  1002. * @vdev: Datapath Vdev handle
  1003. *
  1004. * return void
  1005. */
  1006. static void dp_print_peer_table(struct dp_vdev *vdev)
  1007. {
  1008. struct dp_peer *peer = NULL;
  1009. DP_PRINT_STATS("Dumping Peer Table Stats:");
  1010. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1011. if (!peer) {
  1012. DP_PRINT_STATS("Invalid Peer");
  1013. return;
  1014. }
  1015. DP_PRINT_STATS(" peer_mac_addr = %pM"
  1016. " nawds_enabled = %d"
  1017. " bss_peer = %d"
  1018. " wds_enabled = %d"
  1019. " tx_cap_enabled = %d"
  1020. " rx_cap_enabled = %d"
  1021. " delete in progress = %d"
  1022. " peer id = %d",
  1023. peer->mac_addr.raw,
  1024. peer->nawds_enabled,
  1025. peer->bss_peer,
  1026. peer->wds_enabled,
  1027. peer->tx_cap_enabled,
  1028. peer->rx_cap_enabled,
  1029. peer->delete_in_progress,
  1030. peer->peer_ids[0]);
  1031. }
  1032. }
  1033. #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
  1034. /**
  1035. * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
  1036. * threshold values from the wlan_srng_cfg table for each ring type
  1037. * @soc: device handle
  1038. * @ring_params: per ring specific parameters
  1039. * @ring_type: Ring type
  1040. * @ring_num: Ring number for a given ring type
  1041. *
  1042. * Fill the ring params with the interrupt threshold
  1043. * configuration parameters available in the per ring type wlan_srng_cfg
  1044. * table.
  1045. *
  1046. * Return: None
  1047. */
  1048. static void
  1049. dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
  1050. struct hal_srng_params *ring_params,
  1051. int ring_type, int ring_num,
  1052. int num_entries)
  1053. {
  1054. if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
  1055. ring_params->intr_timer_thres_us =
  1056. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1057. ring_params->intr_batch_cntr_thres_entries =
  1058. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1059. } else {
  1060. ring_params->intr_timer_thres_us =
  1061. soc->wlan_srng_cfg[ring_type].timer_threshold;
  1062. ring_params->intr_batch_cntr_thres_entries =
  1063. soc->wlan_srng_cfg[ring_type].batch_count_threshold;
  1064. }
  1065. ring_params->low_threshold =
  1066. soc->wlan_srng_cfg[ring_type].low_threshold;
  1067. if (ring_params->low_threshold)
  1068. ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1069. }
  1070. #else
  1071. static void
  1072. dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
  1073. struct hal_srng_params *ring_params,
  1074. int ring_type, int ring_num,
  1075. int num_entries)
  1076. {
  1077. if (ring_type == REO_DST) {
  1078. ring_params->intr_timer_thres_us =
  1079. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1080. ring_params->intr_batch_cntr_thres_entries =
  1081. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  1082. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  1083. ring_params->intr_timer_thres_us =
  1084. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  1085. ring_params->intr_batch_cntr_thres_entries =
  1086. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  1087. } else {
  1088. ring_params->intr_timer_thres_us =
  1089. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1090. ring_params->intr_batch_cntr_thres_entries =
  1091. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1092. }
  1093. /* Enable low threshold interrupts for rx buffer rings (regular and
  1094. * monitor buffer rings.
  1095. * TODO: See if this is required for any other ring
  1096. */
  1097. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  1098. (ring_type == RXDMA_MONITOR_STATUS)) {
  1099. /* TODO: Setting low threshold to 1/8th of ring size
  1100. * see if this needs to be configurable
  1101. */
  1102. ring_params->low_threshold = num_entries >> 3;
  1103. ring_params->intr_timer_thres_us =
  1104. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1105. ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1106. ring_params->intr_batch_cntr_thres_entries = 0;
  1107. }
  1108. }
  1109. #endif
  1110. /**
  1111. * dp_srng_setup() - Internal function to setup SRNG rings used by data path
  1112. * @soc: datapath soc handle
  1113. * @srng: srng handle
  1114. * @ring_type: ring that needs to be configured
  1115. * @mac_id: mac number
  1116. * @num_entries: Total number of entries for a given ring
  1117. *
  1118. * Return: non-zero - failure/zero - success
  1119. */
  1120. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  1121. int ring_type, int ring_num, int mac_id,
  1122. uint32_t num_entries, bool cached)
  1123. {
  1124. hal_soc_handle_t hal_soc = soc->hal_soc;
  1125. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  1126. /* TODO: See if we should get align size from hal */
  1127. uint32_t ring_base_align = 8;
  1128. struct hal_srng_params ring_params;
  1129. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  1130. /* TODO: Currently hal layer takes care of endianness related settings.
  1131. * See if these settings need to passed from DP layer
  1132. */
  1133. ring_params.flags = 0;
  1134. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  1135. srng->hal_srng = NULL;
  1136. srng->alloc_size = num_entries * entry_size;
  1137. srng->num_entries = num_entries;
  1138. if (!dp_is_soc_reinit(soc)) {
  1139. if (!cached) {
  1140. ring_params.ring_base_vaddr =
  1141. qdf_aligned_mem_alloc_consistent(
  1142. soc->osdev, &srng->alloc_size,
  1143. &srng->base_vaddr_unaligned,
  1144. &srng->base_paddr_unaligned,
  1145. &ring_params.ring_base_paddr,
  1146. ring_base_align);
  1147. } else {
  1148. ring_params.ring_base_vaddr = qdf_aligned_malloc(
  1149. &srng->alloc_size,
  1150. &srng->base_vaddr_unaligned,
  1151. &srng->base_paddr_unaligned,
  1152. &ring_params.ring_base_paddr,
  1153. ring_base_align);
  1154. }
  1155. if (!ring_params.ring_base_vaddr) {
  1156. dp_err("alloc failed - ring_type: %d, ring_num %d",
  1157. ring_type, ring_num);
  1158. return QDF_STATUS_E_NOMEM;
  1159. }
  1160. }
  1161. ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align(
  1162. (unsigned long)(srng->base_paddr_unaligned),
  1163. ring_base_align);
  1164. ring_params.ring_base_vaddr = (void *)(
  1165. (unsigned long)(srng->base_vaddr_unaligned) +
  1166. ((unsigned long)(ring_params.ring_base_paddr) -
  1167. (unsigned long)(srng->base_paddr_unaligned)));
  1168. qdf_assert_always(ring_params.ring_base_vaddr);
  1169. ring_params.num_entries = num_entries;
  1170. dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
  1171. ring_type, ring_num,
  1172. (void *)ring_params.ring_base_vaddr,
  1173. (void *)ring_params.ring_base_paddr,
  1174. ring_params.num_entries);
  1175. if (soc->intr_mode == DP_INTR_MSI) {
  1176. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  1177. dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
  1178. ring_type, ring_num);
  1179. } else {
  1180. ring_params.msi_data = 0;
  1181. ring_params.msi_addr = 0;
  1182. dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
  1183. ring_type, ring_num);
  1184. }
  1185. dp_srng_configure_interrupt_thresholds(soc, &ring_params,
  1186. ring_type, ring_num,
  1187. num_entries);
  1188. if (cached) {
  1189. ring_params.flags |= HAL_SRNG_CACHED_DESC;
  1190. srng->cached = 1;
  1191. }
  1192. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1193. mac_id, &ring_params);
  1194. if (!srng->hal_srng) {
  1195. if (cached) {
  1196. qdf_mem_free(srng->base_vaddr_unaligned);
  1197. } else {
  1198. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1199. srng->alloc_size,
  1200. srng->base_vaddr_unaligned,
  1201. srng->base_paddr_unaligned, 0);
  1202. }
  1203. }
  1204. return 0;
  1205. }
  1206. /*
  1207. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1208. * @soc: DP SOC handle
  1209. * @srng: source ring structure
  1210. * @ring_type: type of ring
  1211. * @ring_num: ring number
  1212. *
  1213. * Return: None
  1214. */
  1215. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1216. int ring_type, int ring_num)
  1217. {
  1218. if (!srng->hal_srng) {
  1219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1220. FL("Ring type: %d, num:%d not setup"),
  1221. ring_type, ring_num);
  1222. return;
  1223. }
  1224. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1225. srng->hal_srng = NULL;
  1226. }
  1227. /**
  1228. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1229. * Any buffers allocated and attached to ring entries are expected to be freed
  1230. * before calling this function.
  1231. */
  1232. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1233. int ring_type, int ring_num)
  1234. {
  1235. if (!dp_is_soc_reinit(soc)) {
  1236. if (!srng->hal_srng && (srng->alloc_size == 0)) {
  1237. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1238. FL("Ring type: %d, num:%d not setup"),
  1239. ring_type, ring_num);
  1240. return;
  1241. }
  1242. if (srng->hal_srng) {
  1243. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1244. srng->hal_srng = NULL;
  1245. }
  1246. }
  1247. if (srng->alloc_size && srng->base_vaddr_unaligned) {
  1248. if (!srng->cached) {
  1249. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1250. srng->alloc_size,
  1251. srng->base_vaddr_unaligned,
  1252. srng->base_paddr_unaligned, 0);
  1253. } else {
  1254. qdf_mem_free(srng->base_vaddr_unaligned);
  1255. }
  1256. srng->alloc_size = 0;
  1257. srng->base_vaddr_unaligned = NULL;
  1258. }
  1259. srng->hal_srng = NULL;
  1260. }
  1261. /* TODO: Need this interface from HIF */
  1262. void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
  1263. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  1264. int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
  1265. hal_ring_handle_t hal_ring_hdl)
  1266. {
  1267. hal_soc_handle_t hal_soc = dp_soc->hal_soc;
  1268. uint32_t hp, tp;
  1269. uint8_t ring_id;
  1270. hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
  1271. ring_id = hal_srng_ring_id_get(hal_ring_hdl);
  1272. hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
  1273. ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
  1274. return hal_srng_access_start(hal_soc, hal_ring_hdl);
  1275. }
  1276. void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
  1277. hal_ring_handle_t hal_ring_hdl)
  1278. {
  1279. hal_soc_handle_t hal_soc = dp_soc->hal_soc;
  1280. uint32_t hp, tp;
  1281. uint8_t ring_id;
  1282. hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
  1283. ring_id = hal_srng_ring_id_get(hal_ring_hdl);
  1284. hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
  1285. ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
  1286. return hal_srng_access_end(hal_soc, hal_ring_hdl);
  1287. }
  1288. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  1289. /*
  1290. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1291. * @dp_ctx: DP SOC handle
  1292. * @budget: Number of frames/descriptors that can be processed in one shot
  1293. *
  1294. * Return: remaining budget/quota for the soc device
  1295. */
  1296. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1297. {
  1298. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1299. struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
  1300. struct dp_soc *soc = int_ctx->soc;
  1301. int ring = 0;
  1302. uint32_t work_done = 0;
  1303. int budget = dp_budget;
  1304. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1305. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1306. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1307. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1308. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1309. uint32_t remaining_quota = dp_budget;
  1310. struct dp_pdev *pdev = NULL;
  1311. int mac_id;
  1312. dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
  1313. tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
  1314. reo_status_mask,
  1315. int_ctx->rx_mon_ring_mask,
  1316. int_ctx->host2rxdma_ring_mask,
  1317. int_ctx->rxdma2host_ring_mask);
  1318. /* Process Tx completion interrupts first to return back buffers */
  1319. while (tx_mask) {
  1320. if (tx_mask & 0x1) {
  1321. work_done = dp_tx_comp_handler(int_ctx,
  1322. soc,
  1323. soc->tx_comp_ring[ring].hal_srng,
  1324. ring, remaining_quota);
  1325. if (work_done) {
  1326. intr_stats->num_tx_ring_masks[ring]++;
  1327. dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
  1328. tx_mask, ring, budget,
  1329. work_done);
  1330. }
  1331. budget -= work_done;
  1332. if (budget <= 0)
  1333. goto budget_done;
  1334. remaining_quota = budget;
  1335. }
  1336. tx_mask = tx_mask >> 1;
  1337. ring++;
  1338. }
  1339. /* Process REO Exception ring interrupt */
  1340. if (rx_err_mask) {
  1341. work_done = dp_rx_err_process(int_ctx, soc,
  1342. soc->reo_exception_ring.hal_srng,
  1343. remaining_quota);
  1344. if (work_done) {
  1345. intr_stats->num_rx_err_ring_masks++;
  1346. dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
  1347. work_done, budget);
  1348. }
  1349. budget -= work_done;
  1350. if (budget <= 0) {
  1351. goto budget_done;
  1352. }
  1353. remaining_quota = budget;
  1354. }
  1355. /* Process Rx WBM release ring interrupt */
  1356. if (rx_wbm_rel_mask) {
  1357. work_done = dp_rx_wbm_err_process(int_ctx, soc,
  1358. soc->rx_rel_ring.hal_srng,
  1359. remaining_quota);
  1360. if (work_done) {
  1361. intr_stats->num_rx_wbm_rel_ring_masks++;
  1362. dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
  1363. work_done, budget);
  1364. }
  1365. budget -= work_done;
  1366. if (budget <= 0) {
  1367. goto budget_done;
  1368. }
  1369. remaining_quota = budget;
  1370. }
  1371. /* Process Rx interrupts */
  1372. if (rx_mask) {
  1373. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1374. if (!(rx_mask & (1 << ring)))
  1375. continue;
  1376. work_done = dp_rx_process(int_ctx,
  1377. soc->reo_dest_ring[ring].hal_srng,
  1378. ring,
  1379. remaining_quota);
  1380. if (work_done) {
  1381. intr_stats->num_rx_ring_masks[ring]++;
  1382. dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
  1383. rx_mask, ring,
  1384. work_done, budget);
  1385. budget -= work_done;
  1386. if (budget <= 0)
  1387. goto budget_done;
  1388. remaining_quota = budget;
  1389. }
  1390. }
  1391. }
  1392. if (reo_status_mask) {
  1393. if (dp_reo_status_ring_handler(int_ctx, soc))
  1394. int_ctx->intr_stats.num_reo_status_ring_masks++;
  1395. }
  1396. /* Process LMAC interrupts */
  1397. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1398. pdev = soc->pdev_list[ring];
  1399. if (!pdev)
  1400. continue;
  1401. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1402. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1403. pdev->pdev_id);
  1404. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1405. work_done = dp_mon_process(soc, mac_for_pdev,
  1406. remaining_quota);
  1407. if (work_done)
  1408. intr_stats->num_rx_mon_ring_masks++;
  1409. budget -= work_done;
  1410. if (budget <= 0)
  1411. goto budget_done;
  1412. remaining_quota = budget;
  1413. }
  1414. if (int_ctx->rxdma2host_ring_mask &
  1415. (1 << mac_for_pdev)) {
  1416. work_done = dp_rxdma_err_process(int_ctx, soc,
  1417. mac_for_pdev,
  1418. remaining_quota);
  1419. if (work_done)
  1420. intr_stats->num_rxdma2host_ring_masks++;
  1421. budget -= work_done;
  1422. if (budget <= 0)
  1423. goto budget_done;
  1424. remaining_quota = budget;
  1425. }
  1426. if (int_ctx->host2rxdma_ring_mask &
  1427. (1 << mac_for_pdev)) {
  1428. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1429. union dp_rx_desc_list_elem_t *tail = NULL;
  1430. struct dp_srng *rx_refill_buf_ring =
  1431. &pdev->rx_refill_buf_ring;
  1432. intr_stats->num_host2rxdma_ring_masks++;
  1433. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1434. 1);
  1435. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1436. rx_refill_buf_ring,
  1437. &soc->rx_desc_buf[mac_for_pdev],
  1438. 0, &desc_list, &tail);
  1439. }
  1440. }
  1441. }
  1442. qdf_lro_flush(int_ctx->lro_ctx);
  1443. intr_stats->num_masks++;
  1444. budget_done:
  1445. return dp_budget - budget;
  1446. }
  1447. /* dp_interrupt_timer()- timer poll for interrupts
  1448. *
  1449. * @arg: SoC Handle
  1450. *
  1451. * Return:
  1452. *
  1453. */
  1454. static void dp_interrupt_timer(void *arg)
  1455. {
  1456. struct dp_soc *soc = (struct dp_soc *) arg;
  1457. int i;
  1458. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1459. for (i = 0;
  1460. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1461. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1462. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1463. }
  1464. }
  1465. /*
  1466. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1467. * @txrx_soc: DP SOC handle
  1468. *
  1469. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1470. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1471. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1472. *
  1473. * Return: 0 for success, nonzero for failure.
  1474. */
  1475. static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
  1476. {
  1477. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1478. int i;
  1479. soc->intr_mode = DP_INTR_POLL;
  1480. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1481. soc->intr_ctx[i].dp_intr_id = i;
  1482. soc->intr_ctx[i].tx_ring_mask =
  1483. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1484. soc->intr_ctx[i].rx_ring_mask =
  1485. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1486. soc->intr_ctx[i].rx_mon_ring_mask =
  1487. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1488. soc->intr_ctx[i].rx_err_ring_mask =
  1489. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1490. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1491. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1492. soc->intr_ctx[i].reo_status_ring_mask =
  1493. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1494. soc->intr_ctx[i].rxdma2host_ring_mask =
  1495. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1496. soc->intr_ctx[i].soc = soc;
  1497. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1498. }
  1499. qdf_timer_init(soc->osdev, &soc->int_timer,
  1500. dp_interrupt_timer, (void *)soc,
  1501. QDF_TIMER_TYPE_WAKE_APPS);
  1502. return QDF_STATUS_SUCCESS;
  1503. }
  1504. /**
  1505. * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
  1506. * soc: DP soc handle
  1507. *
  1508. * Set the appropriate interrupt mode flag in the soc
  1509. */
  1510. static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
  1511. {
  1512. uint32_t msi_base_data, msi_vector_start;
  1513. int msi_vector_count, ret;
  1514. soc->intr_mode = DP_INTR_LEGACY;
  1515. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1516. (soc->cdp_soc.ol_ops->get_con_mode &&
  1517. soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
  1518. soc->intr_mode = DP_INTR_POLL;
  1519. } else {
  1520. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1521. &msi_vector_count,
  1522. &msi_base_data,
  1523. &msi_vector_start);
  1524. if (ret)
  1525. return;
  1526. soc->intr_mode = DP_INTR_MSI;
  1527. }
  1528. }
  1529. static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
  1530. #if defined(DP_INTR_POLL_BOTH)
  1531. /*
  1532. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1533. * @txrx_soc: DP SOC handle
  1534. *
  1535. * Call the appropriate attach function based on the mode of operation.
  1536. * This is a WAR for enabling monitor mode.
  1537. *
  1538. * Return: 0 for success. nonzero for failure.
  1539. */
  1540. static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
  1541. {
  1542. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1543. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1544. (soc->cdp_soc.ol_ops->get_con_mode &&
  1545. soc->cdp_soc.ol_ops->get_con_mode() ==
  1546. QDF_GLOBAL_MONITOR_MODE)) {
  1547. dp_info("Poll mode");
  1548. return dp_soc_attach_poll(txrx_soc);
  1549. } else {
  1550. dp_info("Interrupt mode");
  1551. return dp_soc_interrupt_attach(txrx_soc);
  1552. }
  1553. }
  1554. #else
  1555. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1556. static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
  1557. {
  1558. return dp_soc_attach_poll(txrx_soc);
  1559. }
  1560. #else
  1561. static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
  1562. {
  1563. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1564. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1565. return dp_soc_attach_poll(txrx_soc);
  1566. else
  1567. return dp_soc_interrupt_attach(txrx_soc);
  1568. }
  1569. #endif
  1570. #endif
  1571. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1572. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1573. {
  1574. int j;
  1575. int num_irq = 0;
  1576. int tx_mask =
  1577. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1578. int rx_mask =
  1579. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1580. int rx_mon_mask =
  1581. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1582. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1583. soc->wlan_cfg_ctx, intr_ctx_num);
  1584. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1585. soc->wlan_cfg_ctx, intr_ctx_num);
  1586. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1587. soc->wlan_cfg_ctx, intr_ctx_num);
  1588. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1589. soc->wlan_cfg_ctx, intr_ctx_num);
  1590. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1591. soc->wlan_cfg_ctx, intr_ctx_num);
  1592. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1593. soc->wlan_cfg_ctx, intr_ctx_num);
  1594. soc->intr_mode = DP_INTR_LEGACY;
  1595. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1596. if (tx_mask & (1 << j)) {
  1597. irq_id_map[num_irq++] =
  1598. (wbm2host_tx_completions_ring1 - j);
  1599. }
  1600. if (rx_mask & (1 << j)) {
  1601. irq_id_map[num_irq++] =
  1602. (reo2host_destination_ring1 - j);
  1603. }
  1604. if (rxdma2host_ring_mask & (1 << j)) {
  1605. irq_id_map[num_irq++] =
  1606. rxdma2host_destination_ring_mac1 -
  1607. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1608. }
  1609. if (host2rxdma_ring_mask & (1 << j)) {
  1610. irq_id_map[num_irq++] =
  1611. host2rxdma_host_buf_ring_mac1 -
  1612. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1613. }
  1614. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1615. irq_id_map[num_irq++] =
  1616. host2rxdma_monitor_ring1 -
  1617. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1618. }
  1619. if (rx_mon_mask & (1 << j)) {
  1620. irq_id_map[num_irq++] =
  1621. ppdu_end_interrupts_mac1 -
  1622. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1623. irq_id_map[num_irq++] =
  1624. rxdma2host_monitor_status_ring_mac1 -
  1625. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1626. }
  1627. if (rx_wbm_rel_ring_mask & (1 << j))
  1628. irq_id_map[num_irq++] = wbm2host_rx_release;
  1629. if (rx_err_ring_mask & (1 << j))
  1630. irq_id_map[num_irq++] = reo2host_exception;
  1631. if (reo_status_ring_mask & (1 << j))
  1632. irq_id_map[num_irq++] = reo2host_status;
  1633. }
  1634. *num_irq_r = num_irq;
  1635. }
  1636. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1637. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1638. int msi_vector_count, int msi_vector_start)
  1639. {
  1640. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1641. soc->wlan_cfg_ctx, intr_ctx_num);
  1642. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1643. soc->wlan_cfg_ctx, intr_ctx_num);
  1644. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1645. soc->wlan_cfg_ctx, intr_ctx_num);
  1646. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1647. soc->wlan_cfg_ctx, intr_ctx_num);
  1648. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1649. soc->wlan_cfg_ctx, intr_ctx_num);
  1650. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1651. soc->wlan_cfg_ctx, intr_ctx_num);
  1652. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1653. soc->wlan_cfg_ctx, intr_ctx_num);
  1654. unsigned int vector =
  1655. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1656. int num_irq = 0;
  1657. soc->intr_mode = DP_INTR_MSI;
  1658. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1659. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1660. irq_id_map[num_irq++] =
  1661. pld_get_msi_irq(soc->osdev->dev, vector);
  1662. *num_irq_r = num_irq;
  1663. }
  1664. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1665. int *irq_id_map, int *num_irq)
  1666. {
  1667. int msi_vector_count, ret;
  1668. uint32_t msi_base_data, msi_vector_start;
  1669. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1670. &msi_vector_count,
  1671. &msi_base_data,
  1672. &msi_vector_start);
  1673. if (ret)
  1674. return dp_soc_interrupt_map_calculate_integrated(soc,
  1675. intr_ctx_num, irq_id_map, num_irq);
  1676. else
  1677. dp_soc_interrupt_map_calculate_msi(soc,
  1678. intr_ctx_num, irq_id_map, num_irq,
  1679. msi_vector_count, msi_vector_start);
  1680. }
  1681. /*
  1682. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1683. * @txrx_soc: DP SOC handle
  1684. *
  1685. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1686. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1687. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1688. *
  1689. * Return: 0 for success. nonzero for failure.
  1690. */
  1691. static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
  1692. {
  1693. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1694. int i = 0;
  1695. int num_irq = 0;
  1696. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1697. int ret = 0;
  1698. /* Map of IRQ ids registered with one interrupt context */
  1699. int irq_id_map[HIF_MAX_GRP_IRQ];
  1700. int tx_mask =
  1701. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1702. int rx_mask =
  1703. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1704. int rx_mon_mask =
  1705. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1706. int rx_err_ring_mask =
  1707. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1708. int rx_wbm_rel_ring_mask =
  1709. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1710. int reo_status_ring_mask =
  1711. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1712. int rxdma2host_ring_mask =
  1713. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1714. int host2rxdma_ring_mask =
  1715. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1716. int host2rxdma_mon_ring_mask =
  1717. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1718. soc->wlan_cfg_ctx, i);
  1719. soc->intr_ctx[i].dp_intr_id = i;
  1720. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1721. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1722. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1723. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1724. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1725. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1726. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1727. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1728. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1729. host2rxdma_mon_ring_mask;
  1730. soc->intr_ctx[i].soc = soc;
  1731. num_irq = 0;
  1732. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1733. &num_irq);
  1734. ret = hif_register_ext_group(soc->hif_handle,
  1735. num_irq, irq_id_map, dp_service_srngs,
  1736. &soc->intr_ctx[i], "dp_intr",
  1737. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1738. if (ret) {
  1739. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1740. FL("failed, ret = %d"), ret);
  1741. return QDF_STATUS_E_FAILURE;
  1742. }
  1743. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1744. }
  1745. hif_configure_ext_group_interrupts(soc->hif_handle);
  1746. return QDF_STATUS_SUCCESS;
  1747. }
  1748. /*
  1749. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1750. * @txrx_soc: DP SOC handle
  1751. *
  1752. * Return: none
  1753. */
  1754. static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
  1755. {
  1756. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1757. int i;
  1758. if (soc->intr_mode == DP_INTR_POLL) {
  1759. qdf_timer_stop(&soc->int_timer);
  1760. qdf_timer_free(&soc->int_timer);
  1761. } else {
  1762. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1763. }
  1764. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1765. soc->intr_ctx[i].tx_ring_mask = 0;
  1766. soc->intr_ctx[i].rx_ring_mask = 0;
  1767. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1768. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1769. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1770. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1771. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1772. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1773. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1774. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1775. }
  1776. }
  1777. #define AVG_MAX_MPDUS_PER_TID 128
  1778. #define AVG_TIDS_PER_CLIENT 2
  1779. #define AVG_FLOWS_PER_TID 2
  1780. #define AVG_MSDUS_PER_FLOW 128
  1781. #define AVG_MSDUS_PER_MPDU 4
  1782. /*
  1783. * Allocate and setup link descriptor pool that will be used by HW for
  1784. * various link and queue descriptors and managed by WBM
  1785. */
  1786. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1787. {
  1788. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1789. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1790. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1791. uint32_t num_mpdus_per_link_desc =
  1792. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1793. uint32_t num_msdus_per_link_desc =
  1794. hal_num_msdus_per_link_desc(soc->hal_soc);
  1795. uint32_t num_mpdu_links_per_queue_desc =
  1796. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1797. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1798. uint32_t total_link_descs, total_mem_size;
  1799. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1800. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1801. uint32_t num_link_desc_banks;
  1802. uint32_t last_bank_size = 0;
  1803. uint32_t entry_size, num_entries;
  1804. int i;
  1805. uint32_t desc_id = 0;
  1806. qdf_dma_addr_t *baseaddr = NULL;
  1807. /* Only Tx queue descriptors are allocated from common link descriptor
  1808. * pool Rx queue descriptors are not included in this because (REO queue
  1809. * extension descriptors) they are expected to be allocated contiguously
  1810. * with REO queue descriptors
  1811. */
  1812. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1813. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1814. num_mpdu_queue_descs = num_mpdu_link_descs /
  1815. num_mpdu_links_per_queue_desc;
  1816. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1817. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1818. num_msdus_per_link_desc;
  1819. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1820. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1821. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1822. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1823. /* Round up to power of 2 */
  1824. total_link_descs = 1;
  1825. while (total_link_descs < num_entries)
  1826. total_link_descs <<= 1;
  1827. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1828. FL("total_link_descs: %u, link_desc_size: %d"),
  1829. total_link_descs, link_desc_size);
  1830. total_mem_size = total_link_descs * link_desc_size;
  1831. total_mem_size += link_desc_align;
  1832. if (total_mem_size <= max_alloc_size) {
  1833. num_link_desc_banks = 0;
  1834. last_bank_size = total_mem_size;
  1835. } else {
  1836. num_link_desc_banks = (total_mem_size) /
  1837. (max_alloc_size - link_desc_align);
  1838. last_bank_size = total_mem_size %
  1839. (max_alloc_size - link_desc_align);
  1840. }
  1841. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1842. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1843. total_mem_size, num_link_desc_banks);
  1844. for (i = 0; i < num_link_desc_banks; i++) {
  1845. if (!dp_is_soc_reinit(soc)) {
  1846. baseaddr = &soc->link_desc_banks[i].
  1847. base_paddr_unaligned;
  1848. soc->link_desc_banks[i].base_vaddr_unaligned =
  1849. qdf_mem_alloc_consistent(soc->osdev,
  1850. soc->osdev->dev,
  1851. max_alloc_size,
  1852. baseaddr);
  1853. }
  1854. soc->link_desc_banks[i].size = max_alloc_size;
  1855. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1856. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1857. ((unsigned long)(
  1858. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1859. link_desc_align));
  1860. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1861. soc->link_desc_banks[i].base_paddr_unaligned) +
  1862. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1863. (unsigned long)(
  1864. soc->link_desc_banks[i].base_vaddr_unaligned));
  1865. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1866. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1867. FL("Link descriptor memory alloc failed"));
  1868. goto fail;
  1869. }
  1870. if (!dp_is_soc_reinit(soc)) {
  1871. qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
  1872. soc->link_desc_banks[i].size,
  1873. "link_desc_bank");
  1874. }
  1875. qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
  1876. soc->link_desc_banks[i].size,
  1877. "link_desc_bank");
  1878. }
  1879. if (last_bank_size) {
  1880. /* Allocate last bank in case total memory required is not exact
  1881. * multiple of max_alloc_size
  1882. */
  1883. if (!dp_is_soc_reinit(soc)) {
  1884. baseaddr = &soc->link_desc_banks[i].
  1885. base_paddr_unaligned;
  1886. soc->link_desc_banks[i].base_vaddr_unaligned =
  1887. qdf_mem_alloc_consistent(soc->osdev,
  1888. soc->osdev->dev,
  1889. last_bank_size,
  1890. baseaddr);
  1891. }
  1892. soc->link_desc_banks[i].size = last_bank_size;
  1893. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1894. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1895. ((unsigned long)(
  1896. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1897. link_desc_align));
  1898. soc->link_desc_banks[i].base_paddr =
  1899. (unsigned long)(
  1900. soc->link_desc_banks[i].base_paddr_unaligned) +
  1901. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1902. (unsigned long)(
  1903. soc->link_desc_banks[i].base_vaddr_unaligned));
  1904. if (!dp_is_soc_reinit(soc)) {
  1905. qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
  1906. soc->link_desc_banks[i].size,
  1907. "link_desc_bank");
  1908. }
  1909. qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
  1910. soc->link_desc_banks[i].size,
  1911. "link_desc_bank");
  1912. }
  1913. /* Allocate and setup link descriptor idle list for HW internal use */
  1914. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1915. total_mem_size = entry_size * total_link_descs;
  1916. if (total_mem_size <= max_alloc_size) {
  1917. void *desc;
  1918. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1919. WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
  1920. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1921. FL("Link desc idle ring setup failed"));
  1922. goto fail;
  1923. }
  1924. qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
  1925. soc->wbm_idle_link_ring.alloc_size,
  1926. "wbm_idle_link_ring");
  1927. hal_srng_access_start_unlocked(soc->hal_soc,
  1928. soc->wbm_idle_link_ring.hal_srng);
  1929. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1930. soc->link_desc_banks[i].base_paddr; i++) {
  1931. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1932. ((unsigned long)(
  1933. soc->link_desc_banks[i].base_vaddr) -
  1934. (unsigned long)(
  1935. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1936. / link_desc_size;
  1937. unsigned long paddr = (unsigned long)(
  1938. soc->link_desc_banks[i].base_paddr);
  1939. while (num_entries && (desc = hal_srng_src_get_next(
  1940. soc->hal_soc,
  1941. soc->wbm_idle_link_ring.hal_srng))) {
  1942. hal_set_link_desc_addr(desc,
  1943. LINK_DESC_COOKIE(desc_id, i), paddr);
  1944. num_entries--;
  1945. desc_id++;
  1946. paddr += link_desc_size;
  1947. }
  1948. }
  1949. hal_srng_access_end_unlocked(soc->hal_soc,
  1950. soc->wbm_idle_link_ring.hal_srng);
  1951. } else {
  1952. uint32_t num_scatter_bufs;
  1953. uint32_t num_entries_per_buf;
  1954. uint32_t rem_entries;
  1955. uint8_t *scatter_buf_ptr;
  1956. uint16_t scatter_buf_num;
  1957. uint32_t buf_size = 0;
  1958. soc->wbm_idle_scatter_buf_size =
  1959. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1960. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1961. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1962. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1963. soc->hal_soc, total_mem_size,
  1964. soc->wbm_idle_scatter_buf_size);
  1965. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1966. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1967. FL("scatter bufs size out of bounds"));
  1968. goto fail;
  1969. }
  1970. for (i = 0; i < num_scatter_bufs; i++) {
  1971. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1972. if (!dp_is_soc_reinit(soc)) {
  1973. buf_size = soc->wbm_idle_scatter_buf_size;
  1974. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1975. qdf_mem_alloc_consistent(soc->osdev,
  1976. soc->osdev->
  1977. dev,
  1978. buf_size,
  1979. baseaddr);
  1980. }
  1981. if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1982. QDF_TRACE(QDF_MODULE_ID_DP,
  1983. QDF_TRACE_LEVEL_ERROR,
  1984. FL("Scatter lst memory alloc fail"));
  1985. goto fail;
  1986. }
  1987. }
  1988. /* Populate idle list scatter buffers with link descriptor
  1989. * pointers
  1990. */
  1991. scatter_buf_num = 0;
  1992. scatter_buf_ptr = (uint8_t *)(
  1993. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1994. rem_entries = num_entries_per_buf;
  1995. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1996. soc->link_desc_banks[i].base_paddr; i++) {
  1997. uint32_t num_link_descs =
  1998. (soc->link_desc_banks[i].size -
  1999. ((unsigned long)(
  2000. soc->link_desc_banks[i].base_vaddr) -
  2001. (unsigned long)(
  2002. soc->link_desc_banks[i].base_vaddr_unaligned)))
  2003. / link_desc_size;
  2004. unsigned long paddr = (unsigned long)(
  2005. soc->link_desc_banks[i].base_paddr);
  2006. while (num_link_descs) {
  2007. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  2008. LINK_DESC_COOKIE(desc_id, i), paddr);
  2009. num_link_descs--;
  2010. desc_id++;
  2011. paddr += link_desc_size;
  2012. rem_entries--;
  2013. if (rem_entries) {
  2014. scatter_buf_ptr += entry_size;
  2015. } else {
  2016. rem_entries = num_entries_per_buf;
  2017. scatter_buf_num++;
  2018. if (scatter_buf_num >= num_scatter_bufs)
  2019. break;
  2020. scatter_buf_ptr = (uint8_t *)(
  2021. soc->wbm_idle_scatter_buf_base_vaddr[
  2022. scatter_buf_num]);
  2023. }
  2024. }
  2025. }
  2026. /* Setup link descriptor idle list in HW */
  2027. hal_setup_link_idle_list(soc->hal_soc,
  2028. soc->wbm_idle_scatter_buf_base_paddr,
  2029. soc->wbm_idle_scatter_buf_base_vaddr,
  2030. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  2031. (uint32_t)(scatter_buf_ptr -
  2032. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  2033. scatter_buf_num-1])), total_link_descs);
  2034. }
  2035. return 0;
  2036. fail:
  2037. if (soc->wbm_idle_link_ring.hal_srng) {
  2038. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  2039. WBM_IDLE_LINK, 0);
  2040. }
  2041. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  2042. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  2043. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  2044. soc->wbm_idle_scatter_buf_size,
  2045. soc->wbm_idle_scatter_buf_base_vaddr[i],
  2046. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  2047. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  2048. }
  2049. }
  2050. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  2051. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  2052. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  2053. soc->link_desc_banks[i].size,
  2054. soc->link_desc_banks[i].base_vaddr_unaligned,
  2055. soc->link_desc_banks[i].base_paddr_unaligned,
  2056. 0);
  2057. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  2058. }
  2059. }
  2060. return QDF_STATUS_E_FAILURE;
  2061. }
  2062. /*
  2063. * Free link descriptor pool that was setup HW
  2064. */
  2065. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  2066. {
  2067. int i;
  2068. if (soc->wbm_idle_link_ring.hal_srng) {
  2069. qdf_minidump_remove(
  2070. soc->wbm_idle_link_ring.base_vaddr_unaligned);
  2071. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  2072. WBM_IDLE_LINK, 0);
  2073. }
  2074. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  2075. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  2076. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  2077. soc->wbm_idle_scatter_buf_size,
  2078. soc->wbm_idle_scatter_buf_base_vaddr[i],
  2079. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  2080. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  2081. }
  2082. }
  2083. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  2084. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  2085. qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
  2086. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  2087. soc->link_desc_banks[i].size,
  2088. soc->link_desc_banks[i].base_vaddr_unaligned,
  2089. soc->link_desc_banks[i].base_paddr_unaligned,
  2090. 0);
  2091. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  2092. }
  2093. }
  2094. }
  2095. #ifdef IPA_OFFLOAD
  2096. #define REO_DST_RING_SIZE_QCA6290 1023
  2097. #ifndef QCA_WIFI_QCA8074_VP
  2098. #define REO_DST_RING_SIZE_QCA8074 1023
  2099. #define REO_DST_RING_SIZE_QCN9000 2048
  2100. #else
  2101. #define REO_DST_RING_SIZE_QCA8074 8
  2102. #define REO_DST_RING_SIZE_QCN9000 8
  2103. #endif /* QCA_WIFI_QCA8074_VP */
  2104. #else
  2105. #define REO_DST_RING_SIZE_QCA6290 1024
  2106. #ifndef QCA_WIFI_QCA8074_VP
  2107. #define REO_DST_RING_SIZE_QCA8074 2048
  2108. #define REO_DST_RING_SIZE_QCN9000 2048
  2109. #else
  2110. #define REO_DST_RING_SIZE_QCA8074 8
  2111. #define REO_DST_RING_SIZE_QCN9000 8
  2112. #endif /* QCA_WIFI_QCA8074_VP */
  2113. #endif /* IPA_OFFLOAD */
  2114. #ifndef FEATURE_WDS
  2115. static void dp_soc_wds_attach(struct dp_soc *soc)
  2116. {
  2117. }
  2118. static void dp_soc_wds_detach(struct dp_soc *soc)
  2119. {
  2120. }
  2121. #endif
  2122. /*
  2123. * dp_soc_reset_ring_map() - Reset cpu ring map
  2124. * @soc: Datapath soc handler
  2125. *
  2126. * This api resets the default cpu ring map
  2127. */
  2128. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  2129. {
  2130. uint8_t i;
  2131. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2132. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  2133. switch (nss_config) {
  2134. case dp_nss_cfg_first_radio:
  2135. /*
  2136. * Setting Tx ring map for one nss offloaded radio
  2137. */
  2138. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  2139. break;
  2140. case dp_nss_cfg_second_radio:
  2141. /*
  2142. * Setting Tx ring for two nss offloaded radios
  2143. */
  2144. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  2145. break;
  2146. case dp_nss_cfg_dbdc:
  2147. /*
  2148. * Setting Tx ring map for 2 nss offloaded radios
  2149. */
  2150. soc->tx_ring_map[i] =
  2151. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  2152. break;
  2153. case dp_nss_cfg_dbtc:
  2154. /*
  2155. * Setting Tx ring map for 3 nss offloaded radios
  2156. */
  2157. soc->tx_ring_map[i] =
  2158. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  2159. break;
  2160. default:
  2161. dp_err("tx_ring_map failed due to invalid nss cfg");
  2162. break;
  2163. }
  2164. }
  2165. }
  2166. /*
  2167. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  2168. * @dp_soc - DP soc handle
  2169. * @ring_type - ring type
  2170. * @ring_num - ring_num
  2171. *
  2172. * return 0 or 1
  2173. */
  2174. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  2175. {
  2176. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2177. uint8_t status = 0;
  2178. switch (ring_type) {
  2179. case WBM2SW_RELEASE:
  2180. case REO_DST:
  2181. case RXDMA_BUF:
  2182. status = ((nss_config) & (1 << ring_num));
  2183. break;
  2184. default:
  2185. break;
  2186. }
  2187. return status;
  2188. }
  2189. /*
  2190. * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings
  2191. * @dp_soc - DP Soc handle
  2192. *
  2193. * Return: Return void
  2194. */
  2195. static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc)
  2196. {
  2197. int *grp_mask = NULL;
  2198. int group_number;
  2199. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2200. group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
  2201. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2202. group_number, 0x0);
  2203. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  2204. group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
  2205. wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
  2206. group_number, 0x0);
  2207. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  2208. group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
  2209. wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
  2210. group_number, 0x0);
  2211. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
  2212. group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
  2213. wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
  2214. group_number, 0x0);
  2215. }
  2216. /*
  2217. * dp_soc_reset_intr_mask() - reset interrupt mask
  2218. * @dp_soc - DP Soc handle
  2219. *
  2220. * Return: Return void
  2221. */
  2222. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  2223. {
  2224. uint8_t j;
  2225. int *grp_mask = NULL;
  2226. int group_number, mask, num_ring;
  2227. /* number of tx ring */
  2228. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  2229. /*
  2230. * group mask for tx completion ring.
  2231. */
  2232. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  2233. /* loop and reset the mask for only offloaded ring */
  2234. for (j = 0; j < num_ring; j++) {
  2235. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  2236. continue;
  2237. }
  2238. /*
  2239. * Group number corresponding to tx offloaded ring.
  2240. */
  2241. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2242. if (group_number < 0) {
  2243. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2244. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2245. WBM2SW_RELEASE, j);
  2246. return;
  2247. }
  2248. /* reset the tx mask for offloaded ring */
  2249. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2250. mask &= (~(1 << j));
  2251. /*
  2252. * reset the interrupt mask for offloaded ring.
  2253. */
  2254. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2255. }
  2256. /* number of rx rings */
  2257. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2258. /*
  2259. * group mask for reo destination ring.
  2260. */
  2261. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2262. /* loop and reset the mask for only offloaded ring */
  2263. for (j = 0; j < num_ring; j++) {
  2264. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2265. continue;
  2266. }
  2267. /*
  2268. * Group number corresponding to rx offloaded ring.
  2269. */
  2270. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2271. if (group_number < 0) {
  2272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2273. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2274. REO_DST, j);
  2275. return;
  2276. }
  2277. /* set the interrupt mask for offloaded ring */
  2278. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2279. mask &= (~(1 << j));
  2280. /*
  2281. * set the interrupt mask to zero for rx offloaded radio.
  2282. */
  2283. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2284. }
  2285. /*
  2286. * group mask for Rx buffer refill ring
  2287. */
  2288. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2289. /* loop and reset the mask for only offloaded ring */
  2290. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2291. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2292. continue;
  2293. }
  2294. /*
  2295. * Group number corresponding to rx offloaded ring.
  2296. */
  2297. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2298. if (group_number < 0) {
  2299. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2300. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2301. REO_DST, j);
  2302. return;
  2303. }
  2304. /* set the interrupt mask for offloaded ring */
  2305. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2306. group_number);
  2307. mask &= (~(1 << j));
  2308. /*
  2309. * set the interrupt mask to zero for rx offloaded radio.
  2310. */
  2311. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2312. group_number, mask);
  2313. }
  2314. }
  2315. #ifdef IPA_OFFLOAD
  2316. /**
  2317. * dp_reo_remap_config() - configure reo remap register value based
  2318. * nss configuration.
  2319. * based on offload_radio value below remap configuration
  2320. * get applied.
  2321. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2322. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2323. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2324. * 3 - both Radios handled by NSS (remap not required)
  2325. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2326. *
  2327. * @remap1: output parameter indicates reo remap 1 register value
  2328. * @remap2: output parameter indicates reo remap 2 register value
  2329. * Return: bool type, true if remap is configured else false.
  2330. */
  2331. bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
  2332. {
  2333. *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
  2334. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
  2335. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
  2336. HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
  2337. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) |
  2338. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) |
  2339. HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
  2340. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23);
  2341. *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) |
  2342. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
  2343. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) |
  2344. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) |
  2345. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
  2346. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
  2347. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
  2348. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
  2349. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2350. return true;
  2351. }
  2352. #else
  2353. static bool dp_reo_remap_config(struct dp_soc *soc,
  2354. uint32_t *remap1,
  2355. uint32_t *remap2)
  2356. {
  2357. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2358. uint8_t target_type;
  2359. target_type = hal_get_target_type(soc->hal_soc);
  2360. switch (offload_radio) {
  2361. case dp_nss_cfg_default:
  2362. *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
  2363. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
  2364. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
  2365. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
  2366. HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) |
  2367. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) |
  2368. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) |
  2369. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
  2370. *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) |
  2371. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
  2372. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
  2373. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
  2374. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
  2375. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
  2376. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
  2377. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31);
  2378. break;
  2379. case dp_nss_cfg_first_radio:
  2380. *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) |
  2381. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
  2382. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
  2383. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) |
  2384. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
  2385. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
  2386. HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) |
  2387. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
  2388. *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
  2389. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
  2390. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
  2391. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
  2392. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) |
  2393. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
  2394. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
  2395. HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31);
  2396. break;
  2397. case dp_nss_cfg_second_radio:
  2398. *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
  2399. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
  2400. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
  2401. HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
  2402. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
  2403. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
  2404. HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
  2405. HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
  2406. *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
  2407. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
  2408. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
  2409. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
  2410. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
  2411. HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
  2412. HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
  2413. HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
  2414. break;
  2415. case dp_nss_cfg_dbdc:
  2416. case dp_nss_cfg_dbtc:
  2417. /* return false if both or all are offloaded to NSS */
  2418. return false;
  2419. }
  2420. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2421. *remap1, *remap2, offload_radio);
  2422. return true;
  2423. }
  2424. #endif /* IPA_OFFLOAD */
  2425. /*
  2426. * dp_reo_frag_dst_set() - configure reo register to set the
  2427. * fragment destination ring
  2428. * @soc : Datapath soc
  2429. * @frag_dst_ring : output parameter to set fragment destination ring
  2430. *
  2431. * Based on offload_radio below fragment destination rings is selected
  2432. * 0 - TCL
  2433. * 1 - SW1
  2434. * 2 - SW2
  2435. * 3 - SW3
  2436. * 4 - SW4
  2437. * 5 - Release
  2438. * 6 - FW
  2439. * 7 - alternate select
  2440. *
  2441. * return: void
  2442. */
  2443. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2444. {
  2445. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2446. switch (offload_radio) {
  2447. case dp_nss_cfg_default:
  2448. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2449. break;
  2450. case dp_nss_cfg_first_radio:
  2451. /*
  2452. * This configuration is valid for single band radio which
  2453. * is also NSS offload.
  2454. */
  2455. case dp_nss_cfg_dbdc:
  2456. case dp_nss_cfg_dbtc:
  2457. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2458. break;
  2459. default:
  2460. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2461. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2462. break;
  2463. }
  2464. }
  2465. #ifdef ENABLE_VERBOSE_DEBUG
  2466. static void dp_enable_verbose_debug(struct dp_soc *soc)
  2467. {
  2468. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2469. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2470. if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
  2471. is_dp_verbose_debug_enabled = true;
  2472. if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
  2473. hal_set_verbose_debug(true);
  2474. else
  2475. hal_set_verbose_debug(false);
  2476. }
  2477. #else
  2478. static void dp_enable_verbose_debug(struct dp_soc *soc)
  2479. {
  2480. }
  2481. #endif
  2482. #ifdef WLAN_FEATURE_STATS_EXT
  2483. static inline void dp_create_ext_stats_event(struct dp_soc *soc)
  2484. {
  2485. qdf_event_create(&soc->rx_hw_stats_event);
  2486. }
  2487. #else
  2488. static inline void dp_create_ext_stats_event(struct dp_soc *soc)
  2489. {
  2490. }
  2491. #endif
  2492. /*
  2493. * dp_soc_cmn_setup() - Common SoC level initializion
  2494. * @soc: Datapath SOC handle
  2495. *
  2496. * This is an internal function used to setup common SOC data structures,
  2497. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2498. */
  2499. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2500. {
  2501. int i, cached;
  2502. struct hal_reo_params reo_params;
  2503. int tx_ring_size;
  2504. int tx_comp_ring_size;
  2505. int reo_dst_ring_size;
  2506. uint32_t entries;
  2507. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2508. if (qdf_atomic_read(&soc->cmn_init_done))
  2509. return 0;
  2510. if (dp_hw_link_desc_pool_setup(soc))
  2511. goto fail1;
  2512. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2513. dp_enable_verbose_debug(soc);
  2514. /* Setup SRNG rings */
  2515. /* Common rings */
  2516. entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
  2517. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2518. entries, 0)) {
  2519. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2520. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2521. goto fail1;
  2522. }
  2523. qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
  2524. soc->wbm_desc_rel_ring.alloc_size,
  2525. "wbm_desc_rel_ring");
  2526. soc->num_tcl_data_rings = 0;
  2527. /* Tx data rings */
  2528. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2529. soc->num_tcl_data_rings =
  2530. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2531. tx_comp_ring_size =
  2532. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2533. tx_ring_size =
  2534. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2535. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2536. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2537. TCL_DATA, i, 0, tx_ring_size, 0)) {
  2538. QDF_TRACE(QDF_MODULE_ID_DP,
  2539. QDF_TRACE_LEVEL_ERROR,
  2540. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2541. goto fail1;
  2542. }
  2543. /* Disable cached desc if NSS offload is enabled */
  2544. cached = WLAN_CFG_DST_RING_CACHED_DESC;
  2545. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  2546. cached = 0;
  2547. /*
  2548. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2549. * count
  2550. */
  2551. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2552. WBM2SW_RELEASE, i, 0,
  2553. tx_comp_ring_size,
  2554. cached)) {
  2555. QDF_TRACE(QDF_MODULE_ID_DP,
  2556. QDF_TRACE_LEVEL_ERROR,
  2557. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2558. goto fail1;
  2559. }
  2560. }
  2561. } else {
  2562. /* This will be incremented during per pdev ring setup */
  2563. soc->num_tcl_data_rings = 0;
  2564. }
  2565. if (dp_tx_soc_attach(soc)) {
  2566. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2567. FL("dp_tx_soc_attach failed"));
  2568. goto fail1;
  2569. }
  2570. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2571. /* TCL command and status rings */
  2572. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2573. entries, 0)) {
  2574. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2575. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2576. goto fail2;
  2577. }
  2578. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2579. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2580. entries, 0)) {
  2581. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2582. FL("dp_srng_setup failed for tcl_status_ring"));
  2583. goto fail2;
  2584. }
  2585. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2586. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2587. * descriptors
  2588. */
  2589. /* Rx data rings */
  2590. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2591. soc->num_reo_dest_rings =
  2592. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2593. QDF_TRACE(QDF_MODULE_ID_DP,
  2594. QDF_TRACE_LEVEL_INFO,
  2595. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2596. /* Disable cached desc if NSS offload is enabled */
  2597. cached = WLAN_CFG_DST_RING_CACHED_DESC;
  2598. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  2599. cached = 0;
  2600. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2601. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2602. i, 0, reo_dst_ring_size, cached)) {
  2603. QDF_TRACE(QDF_MODULE_ID_DP,
  2604. QDF_TRACE_LEVEL_ERROR,
  2605. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2606. goto fail2;
  2607. }
  2608. }
  2609. } else {
  2610. /* This will be incremented during per pdev ring setup */
  2611. soc->num_reo_dest_rings = 0;
  2612. }
  2613. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2614. /* LMAC RxDMA to SW Rings configuration */
  2615. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2616. /* Only valid for MCL */
  2617. struct dp_pdev *pdev = soc->pdev_list[0];
  2618. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2619. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2620. RXDMA_DST, 0, i, entries, 0)) {
  2621. QDF_TRACE(QDF_MODULE_ID_DP,
  2622. QDF_TRACE_LEVEL_ERROR,
  2623. FL(RNG_ERR "rxdma_err_dst_ring"));
  2624. goto fail2;
  2625. }
  2626. }
  2627. }
  2628. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2629. /* REO reinjection ring */
  2630. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2631. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2632. entries, 0)) {
  2633. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2634. FL("dp_srng_setup failed for reo_reinject_ring"));
  2635. goto fail2;
  2636. }
  2637. /* Rx release ring */
  2638. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2639. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
  2640. 0)) {
  2641. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2642. FL("dp_srng_setup failed for rx_rel_ring"));
  2643. goto fail2;
  2644. }
  2645. /* Rx exception ring */
  2646. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2647. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2648. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
  2649. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2650. FL("dp_srng_setup failed for reo_exception_ring"));
  2651. goto fail2;
  2652. }
  2653. /* REO command and status rings */
  2654. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2655. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
  2656. 0)) {
  2657. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2658. FL("dp_srng_setup failed for reo_cmd_ring"));
  2659. goto fail2;
  2660. }
  2661. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2662. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2663. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2664. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2665. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
  2666. 0)) {
  2667. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2668. FL("dp_srng_setup failed for reo_status_ring"));
  2669. goto fail2;
  2670. }
  2671. /*
  2672. * Skip registering hw ring interrupts for WMAC2 on IPQ6018
  2673. * WMAC2 is not there in IPQ6018 platform.
  2674. */
  2675. if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) {
  2676. dp_soc_disable_mac2_intr_mask(soc);
  2677. }
  2678. /* Reset the cpu ring map if radio is NSS offloaded */
  2679. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2680. dp_soc_reset_cpu_ring_map(soc);
  2681. dp_soc_reset_intr_mask(soc);
  2682. }
  2683. /* Setup HW REO */
  2684. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2685. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2686. /*
  2687. * Reo ring remap is not required if both radios
  2688. * are offloaded to NSS
  2689. */
  2690. if (!dp_reo_remap_config(soc,
  2691. &reo_params.remap1,
  2692. &reo_params.remap2))
  2693. goto out;
  2694. reo_params.rx_hash_enabled = true;
  2695. }
  2696. /* setup the global rx defrag waitlist */
  2697. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2698. soc->rx.defrag.timeout_ms =
  2699. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2700. soc->rx.defrag.next_flush_ms = 0;
  2701. soc->rx.flags.defrag_timeout_check =
  2702. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2703. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2704. dp_create_ext_stats_event(soc);
  2705. out:
  2706. /*
  2707. * set the fragment destination ring
  2708. */
  2709. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2710. hal_reo_setup(soc->hal_soc, &reo_params);
  2711. qdf_atomic_set(&soc->cmn_init_done, 1);
  2712. dp_soc_wds_attach(soc);
  2713. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2714. return 0;
  2715. fail2:
  2716. dp_tx_soc_detach(soc);
  2717. fail1:
  2718. /*
  2719. * Cleanup will be done as part of soc_detach, which will
  2720. * be called on pdev attach failure
  2721. */
  2722. return QDF_STATUS_E_FAILURE;
  2723. }
  2724. /*
  2725. * dp_soc_cmn_cleanup() - Common SoC level De-initializion
  2726. *
  2727. * @soc: Datapath SOC handle
  2728. *
  2729. * This function is responsible for cleaning up DP resource of Soc
  2730. * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since
  2731. * dp_soc_detach_wifi3 could not identify some of them
  2732. * whether they have done initialized or not accurately.
  2733. *
  2734. */
  2735. static void dp_soc_cmn_cleanup(struct dp_soc *soc)
  2736. {
  2737. if (!dp_is_soc_reinit(soc)) {
  2738. dp_tx_soc_detach(soc);
  2739. }
  2740. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  2741. dp_reo_cmdlist_destroy(soc);
  2742. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  2743. }
  2744. static QDF_STATUS
  2745. dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
  2746. int force);
  2747. static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2748. {
  2749. struct cdp_lro_hash_config lro_hash;
  2750. QDF_STATUS status;
  2751. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2752. !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
  2753. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2754. dp_err("LRO, GRO and RX hash disabled");
  2755. return QDF_STATUS_E_FAILURE;
  2756. }
  2757. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2758. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
  2759. wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
  2760. lro_hash.lro_enable = 1;
  2761. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2762. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2763. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2764. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2765. }
  2766. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2767. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2768. LRO_IPV4_SEED_ARR_SZ));
  2769. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2770. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2771. LRO_IPV6_SEED_ARR_SZ));
  2772. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2773. if (!soc->cdp_soc.ol_ops->lro_hash_config) {
  2774. QDF_BUG(0);
  2775. dp_err("lro_hash_config not configured");
  2776. return QDF_STATUS_E_FAILURE;
  2777. }
  2778. status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
  2779. pdev->pdev_id,
  2780. &lro_hash);
  2781. if (!QDF_IS_STATUS_SUCCESS(status)) {
  2782. dp_err("failed to send lro_hash_config to FW %u", status);
  2783. return status;
  2784. }
  2785. dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2786. lro_hash.lro_enable, lro_hash.tcp_flag,
  2787. lro_hash.tcp_flag_mask);
  2788. dp_info("toeplitz_hash_ipv4:");
  2789. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2790. lro_hash.toeplitz_hash_ipv4,
  2791. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2792. LRO_IPV4_SEED_ARR_SZ));
  2793. dp_info("toeplitz_hash_ipv6:");
  2794. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2795. lro_hash.toeplitz_hash_ipv6,
  2796. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2797. LRO_IPV6_SEED_ARR_SZ));
  2798. return status;
  2799. }
  2800. /*
  2801. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2802. * @soc: data path SoC handle
  2803. * @pdev: Physical device handle
  2804. *
  2805. * Return: 0 - success, > 0 - failure
  2806. */
  2807. #ifdef QCA_HOST2FW_RXBUF_RING
  2808. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2809. struct dp_pdev *pdev)
  2810. {
  2811. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2812. int max_mac_rings;
  2813. int i;
  2814. int ring_size;
  2815. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2816. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2817. ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
  2818. for (i = 0; i < max_mac_rings; i++) {
  2819. dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
  2820. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2821. RXDMA_BUF, 1, i, ring_size, 0)) {
  2822. QDF_TRACE(QDF_MODULE_ID_DP,
  2823. QDF_TRACE_LEVEL_ERROR,
  2824. FL("failed rx mac ring setup"));
  2825. return QDF_STATUS_E_FAILURE;
  2826. }
  2827. }
  2828. return QDF_STATUS_SUCCESS;
  2829. }
  2830. #else
  2831. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2832. struct dp_pdev *pdev)
  2833. {
  2834. return QDF_STATUS_SUCCESS;
  2835. }
  2836. #endif
  2837. /**
  2838. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2839. * @pdev - DP_PDEV handle
  2840. *
  2841. * Return: void
  2842. */
  2843. static inline void
  2844. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2845. {
  2846. uint8_t map_id;
  2847. struct dp_soc *soc = pdev->soc;
  2848. if (!soc)
  2849. return;
  2850. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2851. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2852. default_dscp_tid_map,
  2853. sizeof(default_dscp_tid_map));
  2854. }
  2855. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2856. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2857. default_dscp_tid_map,
  2858. map_id);
  2859. }
  2860. }
  2861. /**
  2862. * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
  2863. * @pdev - DP_PDEV handle
  2864. *
  2865. * Return: void
  2866. */
  2867. static inline void
  2868. dp_pcp_tid_map_setup(struct dp_pdev *pdev)
  2869. {
  2870. struct dp_soc *soc = pdev->soc;
  2871. if (!soc)
  2872. return;
  2873. qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
  2874. sizeof(default_pcp_tid_map));
  2875. hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
  2876. }
  2877. #ifdef IPA_OFFLOAD
  2878. /**
  2879. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2880. * @soc: data path instance
  2881. * @pdev: core txrx pdev context
  2882. *
  2883. * Return: QDF_STATUS_SUCCESS: success
  2884. * QDF_STATUS_E_RESOURCES: Error return
  2885. */
  2886. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2887. struct dp_pdev *pdev)
  2888. {
  2889. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2890. int entries;
  2891. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2892. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2893. /* Setup second Rx refill buffer ring */
  2894. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2895. IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
  2896. ) {
  2897. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2898. FL("dp_srng_setup failed second rx refill ring"));
  2899. return QDF_STATUS_E_FAILURE;
  2900. }
  2901. return QDF_STATUS_SUCCESS;
  2902. }
  2903. /**
  2904. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2905. * @soc: data path instance
  2906. * @pdev: core txrx pdev context
  2907. *
  2908. * Return: void
  2909. */
  2910. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2911. struct dp_pdev *pdev)
  2912. {
  2913. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2914. IPA_RX_REFILL_BUF_RING_IDX);
  2915. }
  2916. #else
  2917. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2918. struct dp_pdev *pdev)
  2919. {
  2920. return QDF_STATUS_SUCCESS;
  2921. }
  2922. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2923. struct dp_pdev *pdev)
  2924. {
  2925. }
  2926. #endif
  2927. #if !defined(DISABLE_MON_CONFIG)
  2928. /**
  2929. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2930. * @soc: soc handle
  2931. * @pdev: physical device handle
  2932. *
  2933. * Return: nonzero on failure and zero on success
  2934. */
  2935. static
  2936. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2937. {
  2938. int mac_id = 0;
  2939. int pdev_id = pdev->pdev_id;
  2940. int entries;
  2941. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2942. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2943. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2944. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2945. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2946. entries =
  2947. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2948. if (dp_srng_setup(soc,
  2949. &pdev->rxdma_mon_buf_ring[mac_id],
  2950. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2951. entries, 0)) {
  2952. QDF_TRACE(QDF_MODULE_ID_DP,
  2953. QDF_TRACE_LEVEL_ERROR,
  2954. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2955. return QDF_STATUS_E_NOMEM;
  2956. }
  2957. entries =
  2958. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2959. if (dp_srng_setup(soc,
  2960. &pdev->rxdma_mon_dst_ring[mac_id],
  2961. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2962. entries, 0)) {
  2963. QDF_TRACE(QDF_MODULE_ID_DP,
  2964. QDF_TRACE_LEVEL_ERROR,
  2965. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2966. return QDF_STATUS_E_NOMEM;
  2967. }
  2968. entries =
  2969. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2970. if (dp_srng_setup(soc,
  2971. &pdev->rxdma_mon_status_ring[mac_id],
  2972. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2973. entries, 0)) {
  2974. QDF_TRACE(QDF_MODULE_ID_DP,
  2975. QDF_TRACE_LEVEL_ERROR,
  2976. FL(RNG_ERR "rxdma_mon_status_ring"));
  2977. return QDF_STATUS_E_NOMEM;
  2978. }
  2979. entries =
  2980. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2981. if (dp_srng_setup(soc,
  2982. &pdev->rxdma_mon_desc_ring[mac_id],
  2983. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2984. entries, 0)) {
  2985. QDF_TRACE(QDF_MODULE_ID_DP,
  2986. QDF_TRACE_LEVEL_ERROR,
  2987. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2988. return QDF_STATUS_E_NOMEM;
  2989. }
  2990. } else {
  2991. entries =
  2992. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2993. if (dp_srng_setup(soc,
  2994. &pdev->rxdma_mon_status_ring[mac_id],
  2995. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2996. entries, 0)) {
  2997. QDF_TRACE(QDF_MODULE_ID_DP,
  2998. QDF_TRACE_LEVEL_ERROR,
  2999. FL(RNG_ERR "rxdma_mon_status_ring"));
  3000. return QDF_STATUS_E_NOMEM;
  3001. }
  3002. }
  3003. }
  3004. return QDF_STATUS_SUCCESS;
  3005. }
  3006. #else
  3007. static
  3008. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  3009. {
  3010. return QDF_STATUS_SUCCESS;
  3011. }
  3012. #endif
  3013. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  3014. * @pdev_hdl: pdev handle
  3015. */
  3016. #ifdef ATH_SUPPORT_EXT_STAT
  3017. void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  3018. {
  3019. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  3020. struct dp_soc *soc = pdev->soc;
  3021. struct dp_vdev *vdev = NULL;
  3022. struct dp_peer *peer = NULL;
  3023. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3024. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3025. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  3026. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  3027. dp_cal_client_update_peer_stats(&peer->stats);
  3028. }
  3029. }
  3030. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3031. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3032. }
  3033. #else
  3034. void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  3035. {
  3036. }
  3037. #endif
  3038. /*
  3039. * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
  3040. * @pdev: Datapath PDEV handle
  3041. *
  3042. * Return: QDF_STATUS_SUCCESS: Success
  3043. * QDF_STATUS_E_NOMEM: Error
  3044. */
  3045. static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
  3046. {
  3047. pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
  3048. if (!pdev->ppdu_tlv_buf) {
  3049. QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
  3050. return QDF_STATUS_E_NOMEM;
  3051. }
  3052. return QDF_STATUS_SUCCESS;
  3053. }
  3054. /*
  3055. * dp_pdev_attach_wifi3() - attach txrx pdev
  3056. * @txrx_soc: Datapath SOC handle
  3057. * @htc_handle: HTC handle for host-target interface
  3058. * @qdf_osdev: QDF OS device
  3059. * @pdev_id: PDEV ID
  3060. *
  3061. * Return: DP PDEV handle on success, NULL on failure
  3062. */
  3063. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  3064. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  3065. {
  3066. int ring_size;
  3067. int entries;
  3068. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  3069. int nss_cfg;
  3070. void *sojourn_buf;
  3071. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3072. struct dp_pdev *pdev = NULL;
  3073. if (dp_is_soc_reinit(soc)) {
  3074. pdev = soc->pdev_list[pdev_id];
  3075. } else {
  3076. pdev = qdf_mem_malloc(sizeof(*pdev));
  3077. qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev");
  3078. }
  3079. if (!pdev) {
  3080. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3081. FL("DP PDEV memory allocation failed"));
  3082. goto fail0;
  3083. }
  3084. /*
  3085. * Variable to prevent double pdev deinitialization during
  3086. * radio detach execution .i.e. in the absence of any vdev.
  3087. */
  3088. pdev->pdev_deinit = 0;
  3089. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  3090. if (!pdev->invalid_peer) {
  3091. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3092. FL("Invalid peer memory allocation failed"));
  3093. qdf_mem_free(pdev);
  3094. goto fail0;
  3095. }
  3096. soc_cfg_ctx = soc->wlan_cfg_ctx;
  3097. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  3098. if (!pdev->wlan_cfg_ctx) {
  3099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3100. FL("pdev cfg_attach failed"));
  3101. qdf_mem_free(pdev->invalid_peer);
  3102. qdf_mem_free(pdev);
  3103. goto fail0;
  3104. }
  3105. /*
  3106. * set nss pdev config based on soc config
  3107. */
  3108. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  3109. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  3110. (nss_cfg & (1 << pdev_id)));
  3111. pdev->soc = soc;
  3112. pdev->pdev_id = pdev_id;
  3113. soc->pdev_list[pdev_id] = pdev;
  3114. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  3115. soc->pdev_count++;
  3116. TAILQ_INIT(&pdev->vdev_list);
  3117. qdf_spinlock_create(&pdev->vdev_list_lock);
  3118. pdev->vdev_count = 0;
  3119. qdf_spinlock_create(&pdev->tx_mutex);
  3120. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  3121. TAILQ_INIT(&pdev->neighbour_peers_list);
  3122. pdev->neighbour_peers_added = false;
  3123. pdev->monitor_configured = false;
  3124. if (dp_soc_cmn_setup(soc)) {
  3125. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3126. FL("dp_soc_cmn_setup failed"));
  3127. goto fail1;
  3128. }
  3129. /* Setup per PDEV TCL rings if configured */
  3130. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3131. ring_size =
  3132. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  3133. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  3134. pdev_id, pdev_id, ring_size, 0)) {
  3135. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3136. FL("dp_srng_setup failed for tcl_data_ring"));
  3137. goto fail1;
  3138. }
  3139. ring_size =
  3140. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  3141. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  3142. WBM2SW_RELEASE, pdev_id, pdev_id,
  3143. ring_size, 0)) {
  3144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3145. FL("dp_srng_setup failed for tx_comp_ring"));
  3146. goto fail1;
  3147. }
  3148. soc->num_tcl_data_rings++;
  3149. }
  3150. /* Tx specific init */
  3151. if (dp_tx_pdev_attach(pdev)) {
  3152. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3153. FL("dp_tx_pdev_attach failed"));
  3154. goto fail1;
  3155. }
  3156. ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  3157. /* Setup per PDEV REO rings if configured */
  3158. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  3159. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  3160. pdev_id, pdev_id, ring_size, 0)) {
  3161. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3162. FL("dp_srng_setup failed for reo_dest_ringn"));
  3163. goto fail1;
  3164. }
  3165. soc->num_reo_dest_rings++;
  3166. }
  3167. ring_size =
  3168. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
  3169. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  3170. ring_size, 0)) {
  3171. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3172. FL("dp_srng_setup failed rx refill ring"));
  3173. goto fail1;
  3174. }
  3175. if (dp_rxdma_ring_setup(soc, pdev)) {
  3176. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3177. FL("RXDMA ring config failed"));
  3178. goto fail1;
  3179. }
  3180. if (dp_mon_rings_setup(soc, pdev)) {
  3181. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3182. FL("MONITOR rings setup failed"));
  3183. goto fail1;
  3184. }
  3185. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  3186. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  3187. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  3188. 0, pdev_id, entries, 0)) {
  3189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3190. FL(RNG_ERR "rxdma_err_dst_ring"));
  3191. goto fail1;
  3192. }
  3193. }
  3194. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  3195. goto fail1;
  3196. if (dp_ipa_ring_resource_setup(soc, pdev))
  3197. goto fail1;
  3198. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  3199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3200. FL("dp_ipa_uc_attach failed"));
  3201. goto fail1;
  3202. }
  3203. /* Rx specific init */
  3204. if (dp_rx_pdev_attach(pdev)) {
  3205. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3206. FL("dp_rx_pdev_attach failed"));
  3207. goto fail2;
  3208. }
  3209. DP_STATS_INIT(pdev);
  3210. /* Monitor filter init */
  3211. pdev->mon_filter_mode = MON_FILTER_ALL;
  3212. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  3213. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  3214. pdev->fp_data_filter = FILTER_DATA_ALL;
  3215. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  3216. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  3217. pdev->mo_data_filter = FILTER_DATA_ALL;
  3218. dp_local_peer_id_pool_init(pdev);
  3219. dp_dscp_tid_map_setup(pdev);
  3220. dp_pcp_tid_map_setup(pdev);
  3221. /* Rx monitor mode specific init */
  3222. if (dp_rx_pdev_mon_attach(pdev)) {
  3223. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3224. "dp_rx_pdev_mon_attach failed");
  3225. goto fail2;
  3226. }
  3227. if (dp_wdi_event_attach(pdev)) {
  3228. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3229. "dp_wdi_evet_attach failed");
  3230. goto wdi_attach_fail;
  3231. }
  3232. /* set the reo destination during initialization */
  3233. pdev->reo_dest = pdev->pdev_id + 1;
  3234. /*
  3235. * initialize ppdu tlv list
  3236. */
  3237. TAILQ_INIT(&pdev->ppdu_info_list);
  3238. pdev->tlv_count = 0;
  3239. pdev->list_depth = 0;
  3240. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  3241. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  3242. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  3243. TRUE);
  3244. if (pdev->sojourn_buf) {
  3245. sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
  3246. qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
  3247. }
  3248. /* initlialize cal client timer */
  3249. dp_cal_client_attach(&pdev->cal_client_ctx,
  3250. dp_pdev_to_cdp_pdev(pdev),
  3251. pdev->soc->osdev,
  3252. &dp_iterate_update_peer_list);
  3253. qdf_event_create(&pdev->fw_peer_stats_event);
  3254. pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3255. dp_init_tso_stats(pdev);
  3256. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  3257. goto fail1;
  3258. dp_tx_ppdu_stats_attach(pdev);
  3259. return (struct cdp_pdev *)pdev;
  3260. wdi_attach_fail:
  3261. /*
  3262. * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach
  3263. * and hence need not to be done here.
  3264. */
  3265. dp_rx_pdev_mon_detach(pdev);
  3266. fail2:
  3267. dp_rx_pdev_detach(pdev);
  3268. fail1:
  3269. if (pdev->invalid_peer)
  3270. qdf_mem_free(pdev->invalid_peer);
  3271. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  3272. fail0:
  3273. return NULL;
  3274. }
  3275. /*
  3276. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  3277. * @soc: data path SoC handle
  3278. * @pdev: Physical device handle
  3279. *
  3280. * Return: void
  3281. */
  3282. #ifdef QCA_HOST2FW_RXBUF_RING
  3283. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  3284. struct dp_pdev *pdev)
  3285. {
  3286. int i;
  3287. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  3288. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  3289. RXDMA_BUF, 1);
  3290. if (soc->reap_timer_init) {
  3291. qdf_timer_free(&soc->mon_reap_timer);
  3292. soc->reap_timer_init = 0;
  3293. }
  3294. }
  3295. #else
  3296. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  3297. struct dp_pdev *pdev)
  3298. {
  3299. }
  3300. #endif
  3301. /*
  3302. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  3303. * @pdev: device object
  3304. *
  3305. * Return: void
  3306. */
  3307. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  3308. {
  3309. struct dp_neighbour_peer *peer = NULL;
  3310. struct dp_neighbour_peer *temp_peer = NULL;
  3311. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3312. neighbour_peer_list_elem, temp_peer) {
  3313. /* delete this peer from the list */
  3314. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  3315. peer, neighbour_peer_list_elem);
  3316. qdf_mem_free(peer);
  3317. }
  3318. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  3319. }
  3320. /**
  3321. * dp_htt_ppdu_stats_detach() - detach stats resources
  3322. * @pdev: Datapath PDEV handle
  3323. *
  3324. * Return: void
  3325. */
  3326. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  3327. {
  3328. struct ppdu_info *ppdu_info, *ppdu_info_next;
  3329. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  3330. ppdu_info_list_elem, ppdu_info_next) {
  3331. if (!ppdu_info)
  3332. break;
  3333. qdf_assert_always(ppdu_info->nbuf);
  3334. qdf_nbuf_free(ppdu_info->nbuf);
  3335. qdf_mem_free(ppdu_info);
  3336. }
  3337. if (pdev->ppdu_tlv_buf)
  3338. qdf_mem_free(pdev->ppdu_tlv_buf);
  3339. }
  3340. #if !defined(DISABLE_MON_CONFIG)
  3341. static
  3342. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3343. int mac_id)
  3344. {
  3345. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3346. dp_srng_cleanup(soc,
  3347. &pdev->rxdma_mon_buf_ring[mac_id],
  3348. RXDMA_MONITOR_BUF, 0);
  3349. dp_srng_cleanup(soc,
  3350. &pdev->rxdma_mon_dst_ring[mac_id],
  3351. RXDMA_MONITOR_DST, 0);
  3352. dp_srng_cleanup(soc,
  3353. &pdev->rxdma_mon_status_ring[mac_id],
  3354. RXDMA_MONITOR_STATUS, 0);
  3355. dp_srng_cleanup(soc,
  3356. &pdev->rxdma_mon_desc_ring[mac_id],
  3357. RXDMA_MONITOR_DESC, 0);
  3358. dp_srng_cleanup(soc,
  3359. &pdev->rxdma_err_dst_ring[mac_id],
  3360. RXDMA_DST, 0);
  3361. } else {
  3362. dp_srng_cleanup(soc,
  3363. &pdev->rxdma_mon_status_ring[mac_id],
  3364. RXDMA_MONITOR_STATUS, 0);
  3365. dp_srng_cleanup(soc,
  3366. &pdev->rxdma_err_dst_ring[mac_id],
  3367. RXDMA_DST, 0);
  3368. }
  3369. }
  3370. #else
  3371. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  3372. int mac_id)
  3373. {
  3374. }
  3375. #endif
  3376. /**
  3377. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  3378. *
  3379. * @soc: soc handle
  3380. * @pdev: datapath physical dev handle
  3381. * @mac_id: mac number
  3382. *
  3383. * Return: None
  3384. */
  3385. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  3386. int mac_id)
  3387. {
  3388. }
  3389. /**
  3390. * dp_pdev_mem_reset() - Reset txrx pdev memory
  3391. * @pdev: dp pdev handle
  3392. *
  3393. * Return: None
  3394. */
  3395. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  3396. {
  3397. uint16_t len = 0;
  3398. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  3399. len = sizeof(struct dp_pdev) -
  3400. offsetof(struct dp_pdev, pdev_deinit) -
  3401. sizeof(pdev->pdev_deinit);
  3402. dp_pdev_offset = dp_pdev_offset +
  3403. offsetof(struct dp_pdev, pdev_deinit) +
  3404. sizeof(pdev->pdev_deinit);
  3405. qdf_mem_zero(dp_pdev_offset, len);
  3406. }
  3407. #ifdef WLAN_DP_PENDING_MEM_FLUSH
  3408. /**
  3409. * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
  3410. * @pdev: Datapath PDEV handle
  3411. *
  3412. * This is the last chance to flush all pending dp vdevs/peers,
  3413. * some peer/vdev leak case like Non-SSR + peer unmap missing
  3414. * will be covered here.
  3415. *
  3416. * Return: None
  3417. */
  3418. static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
  3419. {
  3420. struct dp_vdev *vdev = NULL;
  3421. while (true) {
  3422. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3423. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  3424. if (vdev->delete.pending)
  3425. break;
  3426. }
  3427. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3428. /*
  3429. * vdev will be freed when all peers get cleanup,
  3430. * dp_delete_pending_vdev will remove vdev from vdev_list
  3431. * in pdev.
  3432. */
  3433. if (vdev)
  3434. dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
  3435. else
  3436. break;
  3437. }
  3438. }
  3439. #else
  3440. static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
  3441. {
  3442. }
  3443. #endif
  3444. /**
  3445. * dp_pdev_deinit() - Deinit txrx pdev
  3446. * @txrx_pdev: Datapath PDEV handle
  3447. * @force: Force deinit
  3448. *
  3449. * Return: None
  3450. */
  3451. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  3452. {
  3453. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3454. struct dp_soc *soc = pdev->soc;
  3455. qdf_nbuf_t curr_nbuf, next_nbuf;
  3456. int mac_id;
  3457. /*
  3458. * Prevent double pdev deinitialization during radio detach
  3459. * execution .i.e. in the absence of any vdev
  3460. */
  3461. if (pdev->pdev_deinit)
  3462. return;
  3463. pdev->pdev_deinit = 1;
  3464. dp_wdi_event_detach(pdev);
  3465. dp_pdev_flush_pending_vdevs(pdev);
  3466. dp_tx_pdev_detach(pdev);
  3467. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3468. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3469. TCL_DATA, pdev->pdev_id);
  3470. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3471. WBM2SW_RELEASE, pdev->pdev_id);
  3472. }
  3473. dp_pktlogmod_exit(pdev);
  3474. dp_rx_fst_detach(soc, pdev);
  3475. dp_rx_pdev_detach(pdev);
  3476. dp_rx_pdev_mon_detach(pdev);
  3477. dp_neighbour_peers_detach(pdev);
  3478. qdf_spinlock_destroy(&pdev->tx_mutex);
  3479. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3480. dp_ipa_uc_detach(soc, pdev);
  3481. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3482. /* Cleanup per PDEV REO rings if configured */
  3483. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3484. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3485. REO_DST, pdev->pdev_id);
  3486. }
  3487. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3488. dp_rxdma_ring_cleanup(soc, pdev);
  3489. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3490. dp_mon_ring_deinit(soc, pdev, mac_id);
  3491. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3492. RXDMA_DST, 0);
  3493. }
  3494. curr_nbuf = pdev->invalid_peer_head_msdu;
  3495. while (curr_nbuf) {
  3496. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3497. qdf_nbuf_free(curr_nbuf);
  3498. curr_nbuf = next_nbuf;
  3499. }
  3500. pdev->invalid_peer_head_msdu = NULL;
  3501. pdev->invalid_peer_tail_msdu = NULL;
  3502. dp_htt_ppdu_stats_detach(pdev);
  3503. dp_tx_ppdu_stats_detach(pdev);
  3504. qdf_nbuf_free(pdev->sojourn_buf);
  3505. qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
  3506. dp_cal_client_detach(&pdev->cal_client_ctx);
  3507. soc->pdev_count--;
  3508. /* only do soc common cleanup when last pdev do detach */
  3509. if (!(soc->pdev_count))
  3510. dp_soc_cmn_cleanup(soc);
  3511. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3512. if (pdev->invalid_peer)
  3513. qdf_mem_free(pdev->invalid_peer);
  3514. qdf_mem_free(pdev->dp_txrx_handle);
  3515. dp_pdev_mem_reset(pdev);
  3516. }
  3517. /**
  3518. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3519. * @psoc: Datapath psoc handle
  3520. * @pdev_id: Id of datapath PDEV handle
  3521. * @force: Force deinit
  3522. *
  3523. * Return: QDF_STATUS
  3524. */
  3525. static QDF_STATUS
  3526. dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
  3527. int force)
  3528. {
  3529. struct dp_soc *soc = (struct dp_soc *)psoc;
  3530. struct dp_pdev *txrx_pdev =
  3531. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
  3532. pdev_id);
  3533. if (!txrx_pdev)
  3534. return QDF_STATUS_E_FAILURE;
  3535. soc->dp_soc_reinit = TRUE;
  3536. dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
  3537. return QDF_STATUS_SUCCESS;
  3538. }
  3539. /*
  3540. * dp_pdev_detach() - Complete rest of pdev detach
  3541. * @txrx_pdev: Datapath PDEV handle
  3542. * @force: Force deinit
  3543. *
  3544. * Return: None
  3545. */
  3546. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3547. {
  3548. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3549. struct dp_soc *soc = pdev->soc;
  3550. struct rx_desc_pool *rx_desc_pool;
  3551. int mac_id, mac_for_pdev;
  3552. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3553. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3554. TCL_DATA, pdev->pdev_id);
  3555. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3556. WBM2SW_RELEASE, pdev->pdev_id);
  3557. }
  3558. dp_mon_link_free(pdev);
  3559. /* Cleanup per PDEV REO rings if configured */
  3560. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3561. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3562. REO_DST, pdev->pdev_id);
  3563. }
  3564. dp_rxdma_ring_cleanup(soc, pdev);
  3565. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3566. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3567. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3568. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3569. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3570. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3571. RXDMA_DST, 0);
  3572. if (dp_is_soc_reinit(soc)) {
  3573. mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  3574. pdev->pdev_id);
  3575. rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
  3576. dp_rx_desc_pool_free(soc, rx_desc_pool);
  3577. rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
  3578. dp_rx_desc_pool_free(soc, rx_desc_pool);
  3579. }
  3580. }
  3581. if (dp_is_soc_reinit(soc)) {
  3582. rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
  3583. dp_rx_desc_pool_free(soc, rx_desc_pool);
  3584. }
  3585. soc->pdev_list[pdev->pdev_id] = NULL;
  3586. qdf_minidump_remove(pdev);
  3587. qdf_mem_free(pdev);
  3588. }
  3589. /*
  3590. * dp_pdev_detach_wifi3() - detach txrx pdev
  3591. * @psoc: Datapath soc handle
  3592. * @pdev_id: pdev id of pdev
  3593. * @force: Force detach
  3594. *
  3595. * Return: QDF_STATUS
  3596. */
  3597. static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
  3598. int force)
  3599. {
  3600. struct dp_soc *soc = (struct dp_soc *)psoc;
  3601. struct dp_pdev *txrx_pdev =
  3602. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
  3603. pdev_id);
  3604. if (!txrx_pdev) {
  3605. dp_err("Couldn't find dp pdev");
  3606. return QDF_STATUS_E_FAILURE;
  3607. }
  3608. if (dp_is_soc_reinit(soc)) {
  3609. dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
  3610. } else {
  3611. dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
  3612. dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
  3613. }
  3614. return QDF_STATUS_SUCCESS;
  3615. }
  3616. /*
  3617. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3618. * @soc: DP SOC handle
  3619. */
  3620. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3621. {
  3622. struct reo_desc_list_node *desc;
  3623. struct dp_rx_tid *rx_tid;
  3624. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3625. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3626. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3627. rx_tid = &desc->rx_tid;
  3628. qdf_mem_unmap_nbytes_single(soc->osdev,
  3629. rx_tid->hw_qdesc_paddr,
  3630. QDF_DMA_BIDIRECTIONAL,
  3631. rx_tid->hw_qdesc_alloc_size);
  3632. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3633. qdf_mem_free(desc);
  3634. }
  3635. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3636. qdf_list_destroy(&soc->reo_desc_freelist);
  3637. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3638. }
  3639. /**
  3640. * dp_soc_mem_reset() - Reset Dp Soc memory
  3641. * @soc: DP handle
  3642. *
  3643. * Return: None
  3644. */
  3645. static void dp_soc_mem_reset(struct dp_soc *soc)
  3646. {
  3647. uint16_t len = 0;
  3648. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3649. len = sizeof(struct dp_soc) -
  3650. offsetof(struct dp_soc, dp_soc_reinit) -
  3651. sizeof(soc->dp_soc_reinit);
  3652. dp_soc_offset = dp_soc_offset +
  3653. offsetof(struct dp_soc, dp_soc_reinit) +
  3654. sizeof(soc->dp_soc_reinit);
  3655. qdf_mem_zero(dp_soc_offset, len);
  3656. }
  3657. /**
  3658. * dp_soc_deinit() - Deinitialize txrx SOC
  3659. * @txrx_soc: Opaque DP SOC handle
  3660. *
  3661. * Return: None
  3662. */
  3663. static void dp_soc_deinit(void *txrx_soc)
  3664. {
  3665. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3666. int i;
  3667. qdf_atomic_set(&soc->cmn_init_done, 0);
  3668. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3669. if (soc->pdev_list[i])
  3670. dp_pdev_deinit((struct cdp_pdev *)
  3671. soc->pdev_list[i], 1);
  3672. }
  3673. qdf_flush_work(&soc->htt_stats.work);
  3674. qdf_disable_work(&soc->htt_stats.work);
  3675. /* Free pending htt stats messages */
  3676. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3677. dp_peer_find_detach(soc);
  3678. /* Free the ring memories */
  3679. /* Common rings */
  3680. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3681. /* Tx data rings */
  3682. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3683. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3684. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3685. TCL_DATA, i);
  3686. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3687. WBM2SW_RELEASE, i);
  3688. }
  3689. }
  3690. /* TCL command and status rings */
  3691. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3692. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3693. /* Rx data rings */
  3694. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3695. soc->num_reo_dest_rings =
  3696. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3697. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3698. /* TODO: Get number of rings and ring sizes
  3699. * from wlan_cfg
  3700. */
  3701. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3702. REO_DST, i);
  3703. }
  3704. }
  3705. /* REO reinjection ring */
  3706. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3707. /* Rx release ring */
  3708. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3709. /* Rx exception ring */
  3710. /* TODO: Better to store ring_type and ring_num in
  3711. * dp_srng during setup
  3712. */
  3713. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3714. /* REO command and status rings */
  3715. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3716. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3717. dp_soc_wds_detach(soc);
  3718. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3719. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3720. htt_soc_htc_dealloc(soc->htt_handle);
  3721. dp_reo_desc_freelist_destroy(soc);
  3722. qdf_spinlock_destroy(&soc->ast_lock);
  3723. dp_soc_mem_reset(soc);
  3724. }
  3725. /**
  3726. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3727. * @txrx_soc: Opaque DP SOC handle
  3728. *
  3729. * Return: None
  3730. */
  3731. static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
  3732. {
  3733. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3734. soc->dp_soc_reinit = 1;
  3735. dp_soc_deinit(txrx_soc);
  3736. }
  3737. /*
  3738. * dp_soc_detach() - Detach rest of txrx SOC
  3739. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3740. *
  3741. * Return: None
  3742. */
  3743. static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
  3744. {
  3745. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3746. int i;
  3747. qdf_atomic_set(&soc->cmn_init_done, 0);
  3748. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3749. * SW descriptors
  3750. */
  3751. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3752. if (soc->pdev_list[i])
  3753. dp_pdev_detach((struct cdp_pdev *)
  3754. soc->pdev_list[i], 1);
  3755. }
  3756. /* Free the ring memories */
  3757. /* Common rings */
  3758. qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
  3759. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3760. if (dp_is_soc_reinit(soc)) {
  3761. dp_tx_soc_detach(soc);
  3762. }
  3763. /* Tx data rings */
  3764. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3765. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3766. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3767. TCL_DATA, i);
  3768. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3769. WBM2SW_RELEASE, i);
  3770. }
  3771. }
  3772. /* TCL command and status rings */
  3773. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3774. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3775. /* Rx data rings */
  3776. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3777. soc->num_reo_dest_rings =
  3778. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3779. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3780. /* TODO: Get number of rings and ring sizes
  3781. * from wlan_cfg
  3782. */
  3783. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3784. REO_DST, i);
  3785. }
  3786. }
  3787. /* REO reinjection ring */
  3788. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3789. /* Rx release ring */
  3790. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3791. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
  3792. /* Rx exception ring */
  3793. /* TODO: Better to store ring_type and ring_num in
  3794. * dp_srng during setup
  3795. */
  3796. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3797. /* REO command and status rings */
  3798. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3799. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3800. dp_hw_link_desc_pool_cleanup(soc);
  3801. htt_soc_detach(soc->htt_handle);
  3802. soc->dp_soc_reinit = 0;
  3803. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3804. qdf_minidump_remove(soc);
  3805. qdf_mem_free(soc);
  3806. }
  3807. /*
  3808. * dp_soc_detach_wifi3() - Detach txrx SOC
  3809. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3810. *
  3811. * Return: None
  3812. */
  3813. static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
  3814. {
  3815. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3816. if (dp_is_soc_reinit(soc)) {
  3817. dp_soc_detach(txrx_soc);
  3818. } else {
  3819. dp_soc_deinit(txrx_soc);
  3820. dp_soc_detach(txrx_soc);
  3821. }
  3822. }
  3823. #if !defined(DISABLE_MON_CONFIG)
  3824. /**
  3825. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3826. * @soc: soc handle
  3827. * @pdev: physical device handle
  3828. * @mac_id: ring number
  3829. * @mac_for_pdev: mac_id
  3830. *
  3831. * Return: non-zero for failure, zero for success
  3832. */
  3833. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3834. struct dp_pdev *pdev,
  3835. int mac_id,
  3836. int mac_for_pdev)
  3837. {
  3838. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3839. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3840. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3841. pdev->rxdma_mon_buf_ring[mac_id]
  3842. .hal_srng,
  3843. RXDMA_MONITOR_BUF);
  3844. if (status != QDF_STATUS_SUCCESS) {
  3845. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3846. return status;
  3847. }
  3848. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3849. pdev->rxdma_mon_dst_ring[mac_id]
  3850. .hal_srng,
  3851. RXDMA_MONITOR_DST);
  3852. if (status != QDF_STATUS_SUCCESS) {
  3853. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3854. return status;
  3855. }
  3856. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3857. pdev->rxdma_mon_status_ring[mac_id]
  3858. .hal_srng,
  3859. RXDMA_MONITOR_STATUS);
  3860. if (status != QDF_STATUS_SUCCESS) {
  3861. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3862. return status;
  3863. }
  3864. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3865. pdev->rxdma_mon_desc_ring[mac_id]
  3866. .hal_srng,
  3867. RXDMA_MONITOR_DESC);
  3868. if (status != QDF_STATUS_SUCCESS) {
  3869. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3870. return status;
  3871. }
  3872. } else {
  3873. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3874. pdev->rxdma_mon_status_ring[mac_id]
  3875. .hal_srng,
  3876. RXDMA_MONITOR_STATUS);
  3877. if (status != QDF_STATUS_SUCCESS) {
  3878. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3879. return status;
  3880. }
  3881. }
  3882. return status;
  3883. }
  3884. #else
  3885. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3886. struct dp_pdev *pdev,
  3887. int mac_id,
  3888. int mac_for_pdev)
  3889. {
  3890. return QDF_STATUS_SUCCESS;
  3891. }
  3892. #endif
  3893. /*
  3894. * dp_rxdma_ring_config() - configure the RX DMA rings
  3895. *
  3896. * This function is used to configure the MAC rings.
  3897. * On MCL host provides buffers in Host2FW ring
  3898. * FW refills (copies) buffers to the ring and updates
  3899. * ring_idx in register
  3900. *
  3901. * @soc: data path SoC handle
  3902. *
  3903. * Return: zero on success, non-zero on failure
  3904. */
  3905. #ifdef QCA_HOST2FW_RXBUF_RING
  3906. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3907. {
  3908. int i;
  3909. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3910. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3911. struct dp_pdev *pdev = soc->pdev_list[i];
  3912. if (pdev) {
  3913. int mac_id;
  3914. bool dbs_enable = 0;
  3915. int max_mac_rings =
  3916. wlan_cfg_get_num_mac_rings
  3917. (pdev->wlan_cfg_ctx);
  3918. htt_srng_setup(soc->htt_handle, 0,
  3919. pdev->rx_refill_buf_ring.hal_srng,
  3920. RXDMA_BUF);
  3921. if (pdev->rx_refill_buf_ring2.hal_srng)
  3922. htt_srng_setup(soc->htt_handle, 0,
  3923. pdev->rx_refill_buf_ring2.hal_srng,
  3924. RXDMA_BUF);
  3925. if (soc->cdp_soc.ol_ops->
  3926. is_hw_dbs_2x2_capable) {
  3927. dbs_enable = soc->cdp_soc.ol_ops->
  3928. is_hw_dbs_2x2_capable(
  3929. (void *)soc->ctrl_psoc);
  3930. }
  3931. if (dbs_enable) {
  3932. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3933. QDF_TRACE_LEVEL_ERROR,
  3934. FL("DBS enabled max_mac_rings %d"),
  3935. max_mac_rings);
  3936. } else {
  3937. max_mac_rings = 1;
  3938. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3939. QDF_TRACE_LEVEL_ERROR,
  3940. FL("DBS disabled, max_mac_rings %d"),
  3941. max_mac_rings);
  3942. }
  3943. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3944. FL("pdev_id %d max_mac_rings %d"),
  3945. pdev->pdev_id, max_mac_rings);
  3946. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3947. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3948. mac_id, pdev->pdev_id);
  3949. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3950. QDF_TRACE_LEVEL_ERROR,
  3951. FL("mac_id %d"), mac_for_pdev);
  3952. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3953. pdev->rx_mac_buf_ring[mac_id]
  3954. .hal_srng,
  3955. RXDMA_BUF);
  3956. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3957. pdev->rxdma_err_dst_ring[mac_id]
  3958. .hal_srng,
  3959. RXDMA_DST);
  3960. /* Configure monitor mode rings */
  3961. status = dp_mon_htt_srng_setup(soc, pdev,
  3962. mac_id,
  3963. mac_for_pdev);
  3964. if (status != QDF_STATUS_SUCCESS) {
  3965. dp_err("Failed to send htt monitor messages to target");
  3966. return status;
  3967. }
  3968. }
  3969. }
  3970. }
  3971. /*
  3972. * Timer to reap rxdma status rings.
  3973. * Needed until we enable ppdu end interrupts
  3974. */
  3975. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3976. dp_service_mon_rings, (void *)soc,
  3977. QDF_TIMER_TYPE_WAKE_APPS);
  3978. soc->reap_timer_init = 1;
  3979. return status;
  3980. }
  3981. #else
  3982. /* This is only for WIN */
  3983. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3984. {
  3985. int i;
  3986. int mac_id;
  3987. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3988. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3989. struct dp_pdev *pdev = soc->pdev_list[i];
  3990. if (!pdev)
  3991. continue;
  3992. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3993. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3994. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3995. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3996. #ifndef DISABLE_MON_CONFIG
  3997. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3998. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3999. RXDMA_MONITOR_BUF);
  4000. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  4001. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  4002. RXDMA_MONITOR_DST);
  4003. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  4004. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4005. RXDMA_MONITOR_STATUS);
  4006. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  4007. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  4008. RXDMA_MONITOR_DESC);
  4009. #endif
  4010. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  4011. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  4012. RXDMA_DST);
  4013. }
  4014. }
  4015. return status;
  4016. }
  4017. #endif
  4018. #ifdef NO_RX_PKT_HDR_TLV
  4019. static QDF_STATUS
  4020. dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
  4021. {
  4022. int i;
  4023. int mac_id;
  4024. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  4025. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4026. htt_tlv_filter.mpdu_start = 1;
  4027. htt_tlv_filter.msdu_start = 1;
  4028. htt_tlv_filter.mpdu_end = 1;
  4029. htt_tlv_filter.msdu_end = 1;
  4030. htt_tlv_filter.attention = 1;
  4031. htt_tlv_filter.packet = 1;
  4032. htt_tlv_filter.packet_header = 0;
  4033. htt_tlv_filter.ppdu_start = 0;
  4034. htt_tlv_filter.ppdu_end = 0;
  4035. htt_tlv_filter.ppdu_end_user_stats = 0;
  4036. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4037. htt_tlv_filter.ppdu_end_status_done = 0;
  4038. htt_tlv_filter.enable_fp = 1;
  4039. htt_tlv_filter.enable_md = 0;
  4040. htt_tlv_filter.enable_md = 0;
  4041. htt_tlv_filter.enable_mo = 0;
  4042. htt_tlv_filter.fp_mgmt_filter = 0;
  4043. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  4044. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  4045. FILTER_DATA_MCAST |
  4046. FILTER_DATA_DATA);
  4047. htt_tlv_filter.mo_mgmt_filter = 0;
  4048. htt_tlv_filter.mo_ctrl_filter = 0;
  4049. htt_tlv_filter.mo_data_filter = 0;
  4050. htt_tlv_filter.md_data_filter = 0;
  4051. htt_tlv_filter.offset_valid = true;
  4052. htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
  4053. /*Not subscribing rx_pkt_header*/
  4054. htt_tlv_filter.rx_header_offset = 0;
  4055. htt_tlv_filter.rx_mpdu_start_offset =
  4056. HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
  4057. htt_tlv_filter.rx_mpdu_end_offset =
  4058. HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
  4059. htt_tlv_filter.rx_msdu_start_offset =
  4060. HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
  4061. htt_tlv_filter.rx_msdu_end_offset =
  4062. HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
  4063. htt_tlv_filter.rx_attn_offset =
  4064. HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
  4065. for (i = 0; i < MAX_PDEV_CNT; i++) {
  4066. struct dp_pdev *pdev = soc->pdev_list[i];
  4067. if (!pdev)
  4068. continue;
  4069. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4070. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4071. pdev->pdev_id);
  4072. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4073. pdev->rx_refill_buf_ring.hal_srng,
  4074. RXDMA_BUF, RX_BUFFER_SIZE,
  4075. &htt_tlv_filter);
  4076. }
  4077. }
  4078. return status;
  4079. }
  4080. #else
  4081. static QDF_STATUS
  4082. dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
  4083. {
  4084. return QDF_STATUS_SUCCESS;
  4085. }
  4086. #endif
  4087. /*
  4088. * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
  4089. *
  4090. * This function is used to configure the FSE HW block in RX OLE on a
  4091. * per pdev basis. Here, we will be programming parameters related to
  4092. * the Flow Search Table.
  4093. *
  4094. * @soc: data path SoC handle
  4095. *
  4096. * Return: zero on success, non-zero on failure
  4097. */
  4098. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  4099. static QDF_STATUS
  4100. dp_rx_target_fst_config(struct dp_soc *soc)
  4101. {
  4102. int i;
  4103. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4104. for (i = 0; i < MAX_PDEV_CNT; i++) {
  4105. struct dp_pdev *pdev = soc->pdev_list[i];
  4106. /* Flow search is not enabled if NSS offload is enabled */
  4107. if (pdev &&
  4108. !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  4109. status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
  4110. if (status != QDF_STATUS_SUCCESS)
  4111. break;
  4112. }
  4113. }
  4114. return status;
  4115. }
  4116. #else
  4117. /**
  4118. * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
  4119. * @soc: SoC handle
  4120. *
  4121. * Return: Success
  4122. */
  4123. static inline QDF_STATUS
  4124. dp_rx_target_fst_config(struct dp_soc *soc)
  4125. {
  4126. return QDF_STATUS_SUCCESS;
  4127. }
  4128. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  4129. /*
  4130. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  4131. * @cdp_soc: Opaque Datapath SOC handle
  4132. *
  4133. * Return: zero on success, non-zero on failure
  4134. */
  4135. static QDF_STATUS
  4136. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  4137. {
  4138. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  4139. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4140. htt_soc_attach_target(soc->htt_handle);
  4141. status = dp_rxdma_ring_config(soc);
  4142. if (status != QDF_STATUS_SUCCESS) {
  4143. dp_err("Failed to send htt srng setup messages to target");
  4144. return status;
  4145. }
  4146. status = dp_rxdma_ring_sel_cfg(soc);
  4147. if (status != QDF_STATUS_SUCCESS) {
  4148. dp_err("Failed to send htt ring config message to target");
  4149. return status;
  4150. }
  4151. status = dp_rx_target_fst_config(soc);
  4152. if (status != QDF_STATUS_SUCCESS) {
  4153. dp_err("Failed to send htt fst setup config message to target");
  4154. return status;
  4155. }
  4156. DP_STATS_INIT(soc);
  4157. /* initialize work queue for stats processing */
  4158. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  4159. qdf_minidump_log(soc, sizeof(*soc), "dp_soc");
  4160. return QDF_STATUS_SUCCESS;
  4161. }
  4162. /*
  4163. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  4164. * @txrx_soc: Datapath SOC handle
  4165. */
  4166. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  4167. {
  4168. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  4169. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  4170. }
  4171. /*
  4172. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  4173. * @txrx_soc: Datapath SOC handle
  4174. * @nss_cfg: nss config
  4175. */
  4176. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  4177. {
  4178. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  4179. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  4180. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  4181. /*
  4182. * TODO: masked out based on the per offloaded radio
  4183. */
  4184. switch (config) {
  4185. case dp_nss_cfg_default:
  4186. break;
  4187. case dp_nss_cfg_first_radio:
  4188. /*
  4189. * This configuration is valid for single band radio which
  4190. * is also NSS offload.
  4191. */
  4192. case dp_nss_cfg_dbdc:
  4193. case dp_nss_cfg_dbtc:
  4194. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  4195. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  4196. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  4197. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  4198. break;
  4199. default:
  4200. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4201. "Invalid offload config %d", config);
  4202. }
  4203. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4204. FL("nss-wifi<0> nss config is enabled"));
  4205. }
  4206. /*
  4207. * dp_vdev_attach_wifi3() - attach txrx vdev
  4208. * @txrx_pdev: Datapath PDEV handle
  4209. * @vdev_mac_addr: MAC address of the virtual interface
  4210. * @vdev_id: VDEV Id
  4211. * @wlan_op_mode: VDEV operating mode
  4212. * @subtype: VDEV operating subtype
  4213. *
  4214. * Return: DP VDEV handle on success, NULL on failure
  4215. */
  4216. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
  4217. uint8_t pdev_id,
  4218. uint8_t *vdev_mac_addr,
  4219. uint8_t vdev_id,
  4220. enum wlan_op_mode op_mode,
  4221. enum wlan_op_subtype subtype)
  4222. {
  4223. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  4224. struct dp_pdev *pdev =
  4225. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  4226. pdev_id);
  4227. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  4228. if (!pdev) {
  4229. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4230. FL("DP PDEV is Null for pdev id %d"), pdev_id);
  4231. qdf_mem_free(vdev);
  4232. goto fail0;
  4233. }
  4234. if (!vdev) {
  4235. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4236. FL("DP VDEV memory allocation failed"));
  4237. goto fail0;
  4238. }
  4239. vdev->pdev = pdev;
  4240. vdev->vdev_id = vdev_id;
  4241. vdev->opmode = op_mode;
  4242. vdev->subtype = subtype;
  4243. vdev->osdev = soc->osdev;
  4244. vdev->osif_rx = NULL;
  4245. vdev->osif_rsim_rx_decap = NULL;
  4246. vdev->osif_get_key = NULL;
  4247. vdev->osif_rx_mon = NULL;
  4248. vdev->osif_tx_free_ext = NULL;
  4249. vdev->osif_vdev = NULL;
  4250. vdev->delete.pending = 0;
  4251. vdev->safemode = 0;
  4252. vdev->drop_unenc = 1;
  4253. vdev->sec_type = cdp_sec_type_none;
  4254. #ifdef notyet
  4255. vdev->filters_num = 0;
  4256. #endif
  4257. qdf_mem_copy(
  4258. &vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
  4259. /* TODO: Initialize default HTT meta data that will be used in
  4260. * TCL descriptors for packets transmitted from this VDEV
  4261. */
  4262. TAILQ_INIT(&vdev->peer_list);
  4263. dp_peer_multipass_list_init(vdev);
  4264. if ((soc->intr_mode == DP_INTR_POLL) &&
  4265. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  4266. if ((pdev->vdev_count == 0) ||
  4267. (wlan_op_mode_monitor == vdev->opmode))
  4268. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  4269. }
  4270. soc->vdev_id_map[vdev_id] = vdev;
  4271. if (wlan_op_mode_monitor == vdev->opmode) {
  4272. pdev->monitor_vdev = vdev;
  4273. return (struct cdp_vdev *)vdev;
  4274. }
  4275. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  4276. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  4277. vdev->dscp_tid_map_id = 0;
  4278. vdev->mcast_enhancement_en = 0;
  4279. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  4280. vdev->prev_tx_enq_tstamp = 0;
  4281. vdev->prev_rx_deliver_tstamp = 0;
  4282. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4283. /* add this vdev into the pdev's list */
  4284. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  4285. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4286. pdev->vdev_count++;
  4287. if (wlan_op_mode_sta != vdev->opmode)
  4288. vdev->ap_bridge_enabled = true;
  4289. else
  4290. vdev->ap_bridge_enabled = false;
  4291. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4292. "%s: wlan_cfg_ap_bridge_enabled %d",
  4293. __func__, vdev->ap_bridge_enabled);
  4294. dp_tx_vdev_attach(vdev);
  4295. if (pdev->vdev_count == 1)
  4296. dp_lro_hash_setup(soc, pdev);
  4297. dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  4298. DP_STATS_INIT(vdev);
  4299. if (wlan_op_mode_sta == vdev->opmode)
  4300. dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
  4301. vdev->mac_addr.raw);
  4302. return (struct cdp_vdev *)vdev;
  4303. fail0:
  4304. return NULL;
  4305. }
  4306. /**
  4307. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  4308. * @soc: Datapath soc handle
  4309. * @vdev_id: id of Datapath VDEV handle
  4310. * @osif_vdev: OSIF vdev handle
  4311. * @txrx_ops: Tx and Rx operations
  4312. *
  4313. * Return: DP VDEV handle on success, NULL on failure
  4314. */
  4315. static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
  4316. uint8_t vdev_id,
  4317. ol_osif_vdev_handle osif_vdev,
  4318. struct ol_txrx_ops *txrx_ops)
  4319. {
  4320. struct dp_vdev *vdev =
  4321. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  4322. vdev_id);
  4323. if (!vdev)
  4324. return QDF_STATUS_E_FAILURE;
  4325. vdev->osif_vdev = osif_vdev;
  4326. vdev->osif_rx = txrx_ops->rx.rx;
  4327. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  4328. vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
  4329. vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
  4330. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  4331. vdev->osif_get_key = txrx_ops->get_key;
  4332. vdev->osif_rx_mon = txrx_ops->rx.mon;
  4333. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  4334. vdev->tx_comp = txrx_ops->tx.tx_comp;
  4335. #ifdef notyet
  4336. #if ATH_SUPPORT_WAPI
  4337. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  4338. #endif
  4339. #endif
  4340. #ifdef UMAC_SUPPORT_PROXY_ARP
  4341. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  4342. #endif
  4343. vdev->me_convert = txrx_ops->me_convert;
  4344. /* TODO: Enable the following once Tx code is integrated */
  4345. if (vdev->mesh_vdev)
  4346. txrx_ops->tx.tx = dp_tx_send_mesh;
  4347. else
  4348. txrx_ops->tx.tx = dp_tx_send;
  4349. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  4350. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  4351. "DP Vdev Register success");
  4352. return QDF_STATUS_SUCCESS;
  4353. }
  4354. /**
  4355. * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
  4356. * @soc: Datapath soc handle
  4357. * @peer: Datapath peer handle
  4358. * @peer_id: Peer ID
  4359. * @vdev_id: Vdev ID
  4360. *
  4361. * Return: void
  4362. */
  4363. static void dp_peer_flush_ast_entry(struct dp_soc *soc,
  4364. struct dp_peer *peer,
  4365. uint16_t peer_id,
  4366. uint8_t vdev_id)
  4367. {
  4368. struct dp_ast_entry *ase, *tmp_ase;
  4369. if (soc->is_peer_map_unmap_v2) {
  4370. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  4371. dp_rx_peer_unmap_handler
  4372. (soc, peer_id,
  4373. vdev_id,
  4374. ase->mac_addr.raw,
  4375. 1);
  4376. }
  4377. }
  4378. }
  4379. /**
  4380. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  4381. * @vdev: Datapath VDEV handle
  4382. * @unmap_only: Flag to indicate "only unmap"
  4383. *
  4384. * Return: void
  4385. */
  4386. static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
  4387. {
  4388. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4389. struct dp_pdev *pdev = vdev->pdev;
  4390. struct dp_soc *soc = pdev->soc;
  4391. struct dp_peer *peer;
  4392. uint16_t *peer_ids;
  4393. struct dp_peer **peer_array = NULL;
  4394. uint8_t i = 0, j = 0;
  4395. uint8_t m = 0, n = 0;
  4396. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  4397. if (!peer_ids) {
  4398. dp_err("DP alloc failure - unable to flush peers");
  4399. return;
  4400. }
  4401. if (!unmap_only) {
  4402. peer_array = qdf_mem_malloc(
  4403. soc->max_peers * sizeof(struct dp_peer *));
  4404. if (!peer_array) {
  4405. qdf_mem_free(peer_ids);
  4406. dp_err("DP alloc failure - unable to flush peers");
  4407. return;
  4408. }
  4409. }
  4410. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4411. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  4412. if (!unmap_only && n < soc->max_peers)
  4413. peer_array[n++] = peer;
  4414. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4415. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  4416. if (j < soc->max_peers)
  4417. peer_ids[j++] = peer->peer_ids[i];
  4418. }
  4419. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4420. /*
  4421. * If peer id is invalid, need to flush the peer if
  4422. * peer valid flag is true, this is needed for NAN + SSR case.
  4423. */
  4424. if (!unmap_only) {
  4425. for (m = 0; m < n ; m++) {
  4426. peer = peer_array[m];
  4427. dp_info("peer: %pM is getting deleted",
  4428. peer->mac_addr.raw);
  4429. /* only if peer valid is true */
  4430. if (peer->valid)
  4431. dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
  4432. vdev->vdev_id,
  4433. peer->mac_addr.raw, 0);
  4434. }
  4435. qdf_mem_free(peer_array);
  4436. }
  4437. for (i = 0; i < j ; i++) {
  4438. peer = __dp_peer_find_by_id(soc, peer_ids[i]);
  4439. if (!peer)
  4440. continue;
  4441. dp_info("peer: %pM is getting unmap",
  4442. peer->mac_addr.raw);
  4443. /* free AST entries of peer */
  4444. dp_peer_flush_ast_entry(soc, peer,
  4445. peer_ids[i],
  4446. vdev->vdev_id);
  4447. dp_rx_peer_unmap_handler(soc, peer_ids[i],
  4448. vdev->vdev_id,
  4449. peer->mac_addr.raw, 0);
  4450. }
  4451. qdf_mem_free(peer_ids);
  4452. dp_info("Flushed peers for vdev object %pK ", vdev);
  4453. }
  4454. /*
  4455. * dp_vdev_detach_wifi3() - Detach txrx vdev
  4456. * @cdp_soc: Datapath soc handle
  4457. * @vdev_id: VDEV Id
  4458. * @callback: Callback OL_IF on completion of detach
  4459. * @cb_context: Callback context
  4460. *
  4461. */
  4462. static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
  4463. uint8_t vdev_id,
  4464. ol_txrx_vdev_delete_cb callback,
  4465. void *cb_context)
  4466. {
  4467. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  4468. struct dp_pdev *pdev;
  4469. struct dp_neighbour_peer *peer = NULL;
  4470. struct dp_neighbour_peer *temp_peer = NULL;
  4471. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  4472. if (!vdev)
  4473. return QDF_STATUS_E_FAILURE;
  4474. pdev = vdev->pdev;
  4475. soc->vdev_id_map[vdev->vdev_id] = NULL;
  4476. if (wlan_op_mode_sta == vdev->opmode)
  4477. dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
  4478. vdev->vap_self_peer->mac_addr.raw, 0);
  4479. /*
  4480. * If Target is hung, flush all peers before detaching vdev
  4481. * this will free all references held due to missing
  4482. * unmap commands from Target
  4483. */
  4484. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  4485. dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
  4486. /*
  4487. * Use peer_ref_mutex while accessing peer_list, in case
  4488. * a peer is in the process of being removed from the list.
  4489. */
  4490. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4491. /* check that the vdev has no peers allocated */
  4492. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  4493. /* debug print - will be removed later */
  4494. dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
  4495. vdev, vdev->mac_addr.raw);
  4496. /* indicate that the vdev needs to be deleted */
  4497. vdev->delete.pending = 1;
  4498. vdev->delete.callback = callback;
  4499. vdev->delete.context = cb_context;
  4500. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4501. return QDF_STATUS_E_FAILURE;
  4502. }
  4503. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4504. if (wlan_op_mode_monitor == vdev->opmode)
  4505. goto free_vdev;
  4506. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4507. if (!soc->hw_nac_monitor_support) {
  4508. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4509. neighbour_peer_list_elem) {
  4510. QDF_ASSERT(peer->vdev != vdev);
  4511. }
  4512. } else {
  4513. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  4514. neighbour_peer_list_elem, temp_peer) {
  4515. if (peer->vdev == vdev) {
  4516. TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
  4517. neighbour_peer_list_elem);
  4518. qdf_mem_free(peer);
  4519. }
  4520. }
  4521. }
  4522. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4523. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4524. dp_tx_vdev_detach(vdev);
  4525. dp_rx_vdev_detach(vdev);
  4526. /* remove the vdev from its parent pdev's list */
  4527. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4528. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4529. free_vdev:
  4530. if (wlan_op_mode_monitor == vdev->opmode)
  4531. pdev->monitor_vdev = NULL;
  4532. dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
  4533. qdf_mem_free(vdev);
  4534. if (callback)
  4535. callback(cb_context);
  4536. return QDF_STATUS_SUCCESS;
  4537. }
  4538. #ifdef FEATURE_AST
  4539. /*
  4540. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  4541. * @soc - datapath soc handle
  4542. * @peer - datapath peer handle
  4543. *
  4544. * Delete the AST entries belonging to a peer
  4545. */
  4546. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  4547. struct dp_peer *peer)
  4548. {
  4549. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  4550. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  4551. dp_peer_del_ast(soc, ast_entry);
  4552. peer->self_ast_entry = NULL;
  4553. }
  4554. #else
  4555. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  4556. struct dp_peer *peer)
  4557. {
  4558. }
  4559. #endif
  4560. #if ATH_SUPPORT_WRAP
  4561. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  4562. uint8_t *peer_mac_addr)
  4563. {
  4564. struct dp_peer *peer;
  4565. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  4566. 0, vdev->vdev_id);
  4567. if (!peer)
  4568. return NULL;
  4569. if (peer->bss_peer)
  4570. return peer;
  4571. dp_peer_unref_delete(peer);
  4572. return NULL;
  4573. }
  4574. #else
  4575. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  4576. uint8_t *peer_mac_addr)
  4577. {
  4578. struct dp_peer *peer;
  4579. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  4580. 0, vdev->vdev_id);
  4581. if (!peer)
  4582. return NULL;
  4583. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  4584. return peer;
  4585. dp_peer_unref_delete(peer);
  4586. return NULL;
  4587. }
  4588. #endif
  4589. #ifdef FEATURE_AST
  4590. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  4591. struct dp_pdev *pdev,
  4592. uint8_t *peer_mac_addr)
  4593. {
  4594. struct dp_ast_entry *ast_entry;
  4595. qdf_spin_lock_bh(&soc->ast_lock);
  4596. if (soc->ast_override_support)
  4597. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
  4598. pdev->pdev_id);
  4599. else
  4600. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  4601. if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
  4602. dp_peer_del_ast(soc, ast_entry);
  4603. qdf_spin_unlock_bh(&soc->ast_lock);
  4604. }
  4605. #endif
  4606. #ifdef PEER_CACHE_RX_PKTS
  4607. static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
  4608. {
  4609. qdf_spinlock_create(&peer->bufq_info.bufq_lock);
  4610. peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  4611. qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
  4612. }
  4613. #else
  4614. static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
  4615. {
  4616. }
  4617. #endif
  4618. #ifdef WLAN_FEATURE_STATS_EXT
  4619. /*
  4620. * dp_set_ignore_reo_status_cb() - set ignore reo status cb flag
  4621. * @soc: dp soc handle
  4622. * @flag: flag to set or reset
  4623. *
  4624. * Return: None
  4625. */
  4626. static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
  4627. bool flag)
  4628. {
  4629. soc->ignore_reo_status_cb = flag;
  4630. }
  4631. #else
  4632. static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
  4633. bool flag)
  4634. {
  4635. }
  4636. #endif
  4637. /*
  4638. * dp_peer_create_wifi3() - attach txrx peer
  4639. * @soc_hdl: Datapath soc handle
  4640. * @vdev_id: id of vdev
  4641. * @peer_mac_addr: Peer MAC address
  4642. *
  4643. * Return: DP peeer handle on success, NULL on failure
  4644. */
  4645. static void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4646. uint8_t *peer_mac_addr)
  4647. {
  4648. struct dp_peer *peer;
  4649. int i;
  4650. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  4651. struct dp_pdev *pdev;
  4652. struct cdp_peer_cookie peer_cookie;
  4653. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  4654. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  4655. if (!vdev || !peer_mac_addr)
  4656. return NULL;
  4657. pdev = vdev->pdev;
  4658. soc = pdev->soc;
  4659. /*
  4660. * If a peer entry with given MAC address already exists,
  4661. * reuse the peer and reset the state of peer.
  4662. */
  4663. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  4664. if (peer) {
  4665. qdf_atomic_init(&peer->is_default_route_set);
  4666. dp_peer_cleanup(vdev, peer, true);
  4667. qdf_spin_lock_bh(&soc->ast_lock);
  4668. dp_peer_delete_ast_entries(soc, peer);
  4669. peer->delete_in_progress = false;
  4670. qdf_spin_unlock_bh(&soc->ast_lock);
  4671. if ((vdev->opmode == wlan_op_mode_sta) &&
  4672. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4673. QDF_MAC_ADDR_SIZE)) {
  4674. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4675. }
  4676. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4677. /*
  4678. * Control path maintains a node count which is incremented
  4679. * for every new peer create command. Since new peer is not being
  4680. * created and earlier reference is reused here,
  4681. * peer_unref_delete event is sent to control path to
  4682. * increment the count back.
  4683. */
  4684. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  4685. soc->cdp_soc.ol_ops->peer_unref_delete(
  4686. soc->ctrl_psoc,
  4687. pdev->pdev_id,
  4688. peer->mac_addr.raw, vdev->mac_addr.raw,
  4689. vdev->opmode);
  4690. }
  4691. dp_local_peer_id_alloc(pdev, peer);
  4692. qdf_spinlock_create(&peer->peer_info_lock);
  4693. dp_peer_rx_bufq_resources_init(peer);
  4694. DP_STATS_INIT(peer);
  4695. DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
  4696. return (void *)peer;
  4697. } else {
  4698. /*
  4699. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  4700. * need to remove the AST entry which was earlier added as a WDS
  4701. * entry.
  4702. * If an AST entry exists, but no peer entry exists with a given
  4703. * MAC addresses, we could deduce it as a WDS entry
  4704. */
  4705. dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
  4706. }
  4707. #ifdef notyet
  4708. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  4709. soc->mempool_ol_ath_peer);
  4710. #else
  4711. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  4712. #endif
  4713. if (!peer)
  4714. return NULL; /* failure */
  4715. qdf_mem_zero(peer, sizeof(struct dp_peer));
  4716. TAILQ_INIT(&peer->ast_entry_list);
  4717. /* store provided params */
  4718. peer->vdev = vdev;
  4719. if ((vdev->opmode == wlan_op_mode_sta) &&
  4720. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4721. QDF_MAC_ADDR_SIZE)) {
  4722. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4723. }
  4724. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4725. qdf_spinlock_create(&peer->peer_info_lock);
  4726. dp_peer_rx_bufq_resources_init(peer);
  4727. qdf_mem_copy(
  4728. &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
  4729. /* initialize the peer_id */
  4730. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4731. peer->peer_ids[i] = HTT_INVALID_PEER;
  4732. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4733. qdf_atomic_init(&peer->ref_cnt);
  4734. /* keep one reference for attach */
  4735. qdf_atomic_inc(&peer->ref_cnt);
  4736. /* add this peer into the vdev's list */
  4737. if (wlan_op_mode_sta == vdev->opmode)
  4738. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  4739. else
  4740. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  4741. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4742. /* TODO: See if hash based search is required */
  4743. dp_peer_find_hash_add(soc, peer);
  4744. /* Initialize the peer state */
  4745. peer->state = OL_TXRX_PEER_STATE_DISC;
  4746. dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
  4747. vdev, peer, peer->mac_addr.raw,
  4748. qdf_atomic_read(&peer->ref_cnt));
  4749. /*
  4750. * For every peer MAp message search and set if bss_peer
  4751. */
  4752. if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
  4753. QDF_MAC_ADDR_SIZE) == 0 &&
  4754. (wlan_op_mode_sta != vdev->opmode)) {
  4755. dp_info("vdev bss_peer!!");
  4756. peer->bss_peer = 1;
  4757. vdev->vap_bss_peer = peer;
  4758. }
  4759. if (wlan_op_mode_sta == vdev->opmode &&
  4760. qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
  4761. QDF_MAC_ADDR_SIZE) == 0) {
  4762. vdev->vap_self_peer = peer;
  4763. }
  4764. if (wlan_op_mode_sta == vdev->opmode &&
  4765. qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
  4766. QDF_MAC_ADDR_SIZE) != 0) {
  4767. dp_set_ignore_reo_status_cb(soc, false);
  4768. }
  4769. for (i = 0; i < DP_MAX_TIDS; i++)
  4770. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  4771. peer->valid = 1;
  4772. dp_local_peer_id_alloc(pdev, peer);
  4773. DP_STATS_INIT(peer);
  4774. DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
  4775. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4776. QDF_MAC_ADDR_SIZE);
  4777. peer_cookie.ctx = NULL;
  4778. peer_cookie.pdev_id = pdev->pdev_id;
  4779. peer_cookie.cookie = pdev->next_peer_cookie++;
  4780. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  4781. dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
  4782. (void *)&peer_cookie,
  4783. peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
  4784. #endif
  4785. if (soc->wlanstats_enabled) {
  4786. if (!peer_cookie.ctx) {
  4787. pdev->next_peer_cookie--;
  4788. qdf_err("Failed to initialize peer rate stats");
  4789. } else {
  4790. peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
  4791. peer_cookie.ctx;
  4792. }
  4793. }
  4794. return (void *)peer;
  4795. }
  4796. /*
  4797. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  4798. * @vdev: Datapath VDEV handle
  4799. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4800. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4801. *
  4802. * Return: None
  4803. */
  4804. static
  4805. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4806. enum cdp_host_reo_dest_ring *reo_dest,
  4807. bool *hash_based)
  4808. {
  4809. struct dp_soc *soc;
  4810. struct dp_pdev *pdev;
  4811. pdev = vdev->pdev;
  4812. soc = pdev->soc;
  4813. /*
  4814. * hash based steering is disabled for Radios which are offloaded
  4815. * to NSS
  4816. */
  4817. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4818. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4819. /*
  4820. * Below line of code will ensure the proper reo_dest ring is chosen
  4821. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4822. */
  4823. *reo_dest = pdev->reo_dest;
  4824. }
  4825. #ifdef IPA_OFFLOAD
  4826. /**
  4827. * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
  4828. * @vdev: Virtual device
  4829. *
  4830. * Return: true if the vdev is of subtype P2P
  4831. * false if the vdev is of any other subtype
  4832. */
  4833. static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
  4834. {
  4835. if (vdev->subtype == wlan_op_subtype_p2p_device ||
  4836. vdev->subtype == wlan_op_subtype_p2p_cli ||
  4837. vdev->subtype == wlan_op_subtype_p2p_go)
  4838. return true;
  4839. return false;
  4840. }
  4841. /*
  4842. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4843. * @vdev: Datapath VDEV handle
  4844. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4845. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4846. *
  4847. * If IPA is enabled in ini, for SAP mode, disable hash based
  4848. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4849. * Return: None
  4850. */
  4851. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4852. enum cdp_host_reo_dest_ring *reo_dest,
  4853. bool *hash_based)
  4854. {
  4855. struct dp_soc *soc;
  4856. struct dp_pdev *pdev;
  4857. pdev = vdev->pdev;
  4858. soc = pdev->soc;
  4859. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4860. /* For P2P-GO interfaces we do not need to change the REO
  4861. * configuration even if IPA config is enabled
  4862. */
  4863. if (dp_is_vdev_subtype_p2p(vdev))
  4864. return;
  4865. /*
  4866. * If IPA is enabled, disable hash-based flow steering and set
  4867. * reo_dest_ring_4 as the REO ring to receive packets on.
  4868. * IPA is configured to reap reo_dest_ring_4.
  4869. *
  4870. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4871. * value enum value is from 1 - 4.
  4872. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4873. */
  4874. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4875. if (vdev->opmode == wlan_op_mode_ap) {
  4876. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4877. *hash_based = 0;
  4878. } else if (vdev->opmode == wlan_op_mode_sta &&
  4879. dp_ipa_is_mdm_platform()) {
  4880. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4881. }
  4882. }
  4883. }
  4884. #else
  4885. /*
  4886. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4887. * @vdev: Datapath VDEV handle
  4888. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4889. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4890. *
  4891. * Use system config values for hash based steering.
  4892. * Return: None
  4893. */
  4894. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4895. enum cdp_host_reo_dest_ring *reo_dest,
  4896. bool *hash_based)
  4897. {
  4898. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4899. }
  4900. #endif /* IPA_OFFLOAD */
  4901. /*
  4902. * dp_peer_setup_wifi3() - initialize the peer
  4903. * @soc_hdl: soc handle object
  4904. * @vdev_id : vdev_id of vdev object
  4905. * @peer_mac: Peer's mac address
  4906. *
  4907. * Return: QDF_STATUS
  4908. */
  4909. static QDF_STATUS
  4910. dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4911. uint8_t *peer_mac)
  4912. {
  4913. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  4914. struct dp_pdev *pdev;
  4915. bool hash_based = 0;
  4916. enum cdp_host_reo_dest_ring reo_dest;
  4917. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4918. struct dp_vdev *vdev =
  4919. dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  4920. struct dp_peer *peer =
  4921. dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
  4922. if (!vdev || !peer || peer->delete_in_progress) {
  4923. status = QDF_STATUS_E_FAILURE;
  4924. goto fail;
  4925. }
  4926. pdev = vdev->pdev;
  4927. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4928. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4929. pdev->pdev_id, vdev->vdev_id,
  4930. vdev->opmode, hash_based, reo_dest);
  4931. /*
  4932. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4933. * i.e both the devices have same MAC address. In these
  4934. * cases we want such pkts to be processed in NULL Q handler
  4935. * which is REO2TCL ring. for this reason we should
  4936. * not setup reo_queues and default route for bss_peer.
  4937. */
  4938. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
  4939. status = QDF_STATUS_E_FAILURE;
  4940. goto fail;
  4941. }
  4942. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4943. /* TODO: Check the destination ring number to be passed to FW */
  4944. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4945. soc->ctrl_psoc,
  4946. peer->vdev->pdev->pdev_id,
  4947. peer->mac_addr.raw,
  4948. peer->vdev->vdev_id, hash_based, reo_dest);
  4949. }
  4950. qdf_atomic_set(&peer->is_default_route_set, 1);
  4951. dp_peer_rx_init(pdev, peer);
  4952. dp_peer_tx_init(pdev, peer);
  4953. dp_peer_ppdu_delayed_ba_init(peer);
  4954. fail:
  4955. if (peer)
  4956. dp_peer_unref_delete(peer);
  4957. return status;
  4958. }
  4959. /*
  4960. * dp_cp_peer_del_resp_handler - Handle the peer delete response
  4961. * @soc_hdl: Datapath SOC handle
  4962. * @vdev_id: id of virtual device object
  4963. * @mac_addr: Mac address of the peer
  4964. *
  4965. * Return: QDF_STATUS
  4966. */
  4967. static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
  4968. uint8_t vdev_id,
  4969. uint8_t *mac_addr)
  4970. {
  4971. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  4972. struct dp_ast_entry *ast_entry = NULL;
  4973. txrx_ast_free_cb cb = NULL;
  4974. void *cookie;
  4975. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  4976. if (!vdev)
  4977. return QDF_STATUS_E_FAILURE;
  4978. qdf_spin_lock_bh(&soc->ast_lock);
  4979. if (soc->ast_override_support)
  4980. ast_entry =
  4981. dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
  4982. vdev->pdev->pdev_id);
  4983. else
  4984. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  4985. /* in case of qwrap we have multiple BSS peers
  4986. * with same mac address
  4987. *
  4988. * AST entry for this mac address will be created
  4989. * only for one peer hence it will be NULL here
  4990. */
  4991. if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
  4992. qdf_spin_unlock_bh(&soc->ast_lock);
  4993. return QDF_STATUS_E_FAILURE;
  4994. }
  4995. if (ast_entry->is_mapped)
  4996. soc->ast_table[ast_entry->ast_idx] = NULL;
  4997. DP_STATS_INC(soc, ast.deleted, 1);
  4998. dp_peer_ast_hash_remove(soc, ast_entry);
  4999. cb = ast_entry->callback;
  5000. cookie = ast_entry->cookie;
  5001. ast_entry->callback = NULL;
  5002. ast_entry->cookie = NULL;
  5003. soc->num_ast_entries--;
  5004. qdf_spin_unlock_bh(&soc->ast_lock);
  5005. if (cb) {
  5006. cb(soc->ctrl_psoc,
  5007. dp_soc_to_cdp_soc(soc),
  5008. cookie,
  5009. CDP_TXRX_AST_DELETED);
  5010. }
  5011. qdf_mem_free(ast_entry);
  5012. return QDF_STATUS_SUCCESS;
  5013. }
  5014. /*
  5015. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  5016. * @vdev_handle: virtual device object
  5017. * @htt_pkt_type: type of pkt
  5018. *
  5019. * Return: void
  5020. */
  5021. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  5022. enum htt_cmn_pkt_type val)
  5023. {
  5024. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5025. vdev->tx_encap_type = val;
  5026. }
  5027. /*
  5028. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  5029. * @vdev_handle: virtual device object
  5030. * @htt_pkt_type: type of pkt
  5031. *
  5032. * Return: void
  5033. */
  5034. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  5035. enum htt_cmn_pkt_type val)
  5036. {
  5037. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5038. vdev->rx_decap_type = val;
  5039. }
  5040. /*
  5041. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  5042. * @txrx_soc: cdp soc handle
  5043. * @ac: Access category
  5044. * @value: timeout value in millisec
  5045. *
  5046. * Return: void
  5047. */
  5048. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  5049. uint8_t ac, uint32_t value)
  5050. {
  5051. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5052. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  5053. }
  5054. /*
  5055. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  5056. * @txrx_soc: cdp soc handle
  5057. * @ac: access category
  5058. * @value: timeout value in millisec
  5059. *
  5060. * Return: void
  5061. */
  5062. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  5063. uint8_t ac, uint32_t *value)
  5064. {
  5065. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5066. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  5067. }
  5068. /*
  5069. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  5070. * @pdev_handle: physical device object
  5071. * @val: reo destination ring index (1 - 4)
  5072. *
  5073. * Return: void
  5074. */
  5075. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  5076. enum cdp_host_reo_dest_ring val)
  5077. {
  5078. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5079. if (pdev)
  5080. pdev->reo_dest = val;
  5081. }
  5082. /*
  5083. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  5084. * @pdev_handle: physical device object
  5085. *
  5086. * Return: reo destination ring index
  5087. */
  5088. static enum cdp_host_reo_dest_ring
  5089. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  5090. {
  5091. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5092. if (pdev)
  5093. return pdev->reo_dest;
  5094. else
  5095. return cdp_host_reo_dest_ring_unknown;
  5096. }
  5097. /*
  5098. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  5099. * @pdev_handle: device object
  5100. * @val: value to be set
  5101. *
  5102. * Return: void
  5103. */
  5104. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  5105. uint32_t val)
  5106. {
  5107. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5108. /* Enable/Disable smart mesh filtering. This flag will be checked
  5109. * during rx processing to check if packets are from NAC clients.
  5110. */
  5111. pdev->filter_neighbour_peers = val;
  5112. return 0;
  5113. }
  5114. /*
  5115. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  5116. * address for smart mesh filtering
  5117. * @vdev_handle: virtual device object
  5118. * @cmd: Add/Del command
  5119. * @macaddr: nac client mac address
  5120. *
  5121. * Return: void
  5122. */
  5123. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  5124. uint32_t cmd, uint8_t *macaddr)
  5125. {
  5126. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5127. struct dp_pdev *pdev = vdev->pdev;
  5128. struct dp_neighbour_peer *peer = NULL;
  5129. if (!macaddr)
  5130. goto fail0;
  5131. /* Store address of NAC (neighbour peer) which will be checked
  5132. * against TA of received packets.
  5133. */
  5134. if (cmd == DP_NAC_PARAM_ADD) {
  5135. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  5136. sizeof(*peer));
  5137. if (!peer) {
  5138. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5139. FL("DP neighbour peer node memory allocation failed"));
  5140. goto fail0;
  5141. }
  5142. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  5143. macaddr, QDF_MAC_ADDR_SIZE);
  5144. peer->vdev = vdev;
  5145. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  5146. /* add this neighbour peer into the list */
  5147. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  5148. neighbour_peer_list_elem);
  5149. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  5150. /* first neighbour */
  5151. if (!pdev->neighbour_peers_added) {
  5152. pdev->neighbour_peers_added = true;
  5153. dp_ppdu_ring_cfg(pdev);
  5154. }
  5155. return 1;
  5156. } else if (cmd == DP_NAC_PARAM_DEL) {
  5157. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  5158. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  5159. neighbour_peer_list_elem) {
  5160. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  5161. macaddr, QDF_MAC_ADDR_SIZE)) {
  5162. /* delete this peer from the list */
  5163. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  5164. peer, neighbour_peer_list_elem);
  5165. qdf_mem_free(peer);
  5166. break;
  5167. }
  5168. }
  5169. /* last neighbour deleted */
  5170. if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
  5171. pdev->neighbour_peers_added = false;
  5172. dp_ppdu_ring_cfg(pdev);
  5173. }
  5174. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  5175. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  5176. !pdev->enhanced_stats_en)
  5177. dp_ppdu_ring_reset(pdev);
  5178. return 1;
  5179. }
  5180. fail0:
  5181. return 0;
  5182. }
  5183. /*
  5184. * dp_get_sec_type() - Get the security type
  5185. * @peer: Datapath peer handle
  5186. * @sec_idx: Security id (mcast, ucast)
  5187. *
  5188. * return sec_type: Security type
  5189. */
  5190. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  5191. {
  5192. struct dp_peer *dpeer = (struct dp_peer *)peer;
  5193. return dpeer->security[sec_idx].sec_type;
  5194. }
  5195. /*
  5196. * dp_peer_authorize() - authorize txrx peer
  5197. * @peer_handle: Datapath peer handle
  5198. * @authorize
  5199. *
  5200. */
  5201. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  5202. {
  5203. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  5204. struct dp_soc *soc;
  5205. if (peer) {
  5206. soc = peer->vdev->pdev->soc;
  5207. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5208. peer->authorize = authorize ? 1 : 0;
  5209. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5210. }
  5211. }
  5212. /*
  5213. * dp_vdev_reset_peer() - Update peer related member in vdev
  5214. as peer is going to free
  5215. * @vdev: datapath vdev handle
  5216. * @peer: dataptah peer handle
  5217. *
  5218. * Return: None
  5219. */
  5220. static void dp_vdev_reset_peer(struct dp_vdev *vdev,
  5221. struct dp_peer *peer)
  5222. {
  5223. struct dp_peer *bss_peer = NULL;
  5224. if (!vdev) {
  5225. dp_err("vdev is NULL");
  5226. } else {
  5227. if (vdev->vap_bss_peer == peer)
  5228. vdev->vap_bss_peer = NULL;
  5229. if (vdev && vdev->vap_bss_peer) {
  5230. bss_peer = vdev->vap_bss_peer;
  5231. DP_UPDATE_STATS(vdev, peer);
  5232. }
  5233. }
  5234. }
  5235. /*
  5236. * dp_peer_release_mem() - free dp peer handle memory
  5237. * @soc: dataptah soc handle
  5238. * @pdev: datapath pdev handle
  5239. * @peer: datapath peer handle
  5240. * @vdev_opmode: Vdev operation mode
  5241. * @vdev_mac_addr: Vdev Mac address
  5242. *
  5243. * Return: None
  5244. */
  5245. static void dp_peer_release_mem(struct dp_soc *soc,
  5246. struct dp_pdev *pdev,
  5247. struct dp_peer *peer,
  5248. enum wlan_op_mode vdev_opmode,
  5249. uint8_t *vdev_mac_addr)
  5250. {
  5251. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  5252. soc->cdp_soc.ol_ops->peer_unref_delete(
  5253. soc->ctrl_psoc,
  5254. pdev->pdev_id,
  5255. peer->mac_addr.raw, vdev_mac_addr,
  5256. vdev_opmode);
  5257. /*
  5258. * Peer AST list hast to be empty here
  5259. */
  5260. DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
  5261. qdf_mem_free(peer);
  5262. }
  5263. /**
  5264. * dp_delete_pending_vdev() - check and process vdev delete
  5265. * @pdev: DP specific pdev pointer
  5266. * @vdev: DP specific vdev pointer
  5267. * @vdev_id: vdev id corresponding to vdev
  5268. *
  5269. * This API does following:
  5270. * 1) It releases tx flow pools buffers as vdev is
  5271. * going down and no peers are associated.
  5272. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  5273. */
  5274. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5275. uint8_t vdev_id)
  5276. {
  5277. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  5278. void *vdev_delete_context = NULL;
  5279. vdev_delete_cb = vdev->delete.callback;
  5280. vdev_delete_context = vdev->delete.context;
  5281. dp_info("deleting vdev object %pK (%pM)- its last peer is done",
  5282. vdev, vdev->mac_addr.raw);
  5283. /* all peers are gone, go ahead and delete it */
  5284. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  5285. FLOW_TYPE_VDEV, vdev_id);
  5286. dp_tx_vdev_detach(vdev);
  5287. pdev->soc->vdev_id_map[vdev_id] = NULL;
  5288. if (wlan_op_mode_monitor == vdev->opmode) {
  5289. pdev->monitor_vdev = NULL;
  5290. } else {
  5291. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5292. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  5293. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5294. }
  5295. dp_info("deleting vdev object %pK (%pM)",
  5296. vdev, vdev->mac_addr.raw);
  5297. qdf_mem_free(vdev);
  5298. vdev = NULL;
  5299. if (vdev_delete_cb)
  5300. vdev_delete_cb(vdev_delete_context);
  5301. }
  5302. /*
  5303. * dp_peer_unref_delete() - unref and delete peer
  5304. * @peer_handle: Datapath peer handle
  5305. *
  5306. */
  5307. void dp_peer_unref_delete(struct dp_peer *peer)
  5308. {
  5309. struct dp_vdev *vdev = peer->vdev;
  5310. struct dp_pdev *pdev = vdev->pdev;
  5311. struct dp_soc *soc = pdev->soc;
  5312. struct dp_peer *tmppeer;
  5313. int found = 0;
  5314. uint16_t peer_id;
  5315. uint16_t vdev_id;
  5316. bool vdev_delete = false;
  5317. struct cdp_peer_cookie peer_cookie;
  5318. enum wlan_op_mode vdev_opmode;
  5319. uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
  5320. /*
  5321. * Hold the lock all the way from checking if the peer ref count
  5322. * is zero until the peer references are removed from the hash
  5323. * table and vdev list (if the peer ref count is zero).
  5324. * This protects against a new HL tx operation starting to use the
  5325. * peer object just after this function concludes it's done being used.
  5326. * Furthermore, the lock needs to be held while checking whether the
  5327. * vdev's list of peers is empty, to make sure that list is not modified
  5328. * concurrently with the empty check.
  5329. */
  5330. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5331. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  5332. peer_id = peer->peer_ids[0];
  5333. vdev_id = vdev->vdev_id;
  5334. /*
  5335. * Make sure that the reference to the peer in
  5336. * peer object map is removed
  5337. */
  5338. if (peer_id != HTT_INVALID_PEER)
  5339. soc->peer_id_to_obj_map[peer_id] = NULL;
  5340. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  5341. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  5342. /* remove the reference to the peer from the hash table */
  5343. dp_peer_find_hash_remove(soc, peer);
  5344. qdf_spin_lock_bh(&soc->ast_lock);
  5345. if (peer->self_ast_entry) {
  5346. dp_peer_del_ast(soc, peer->self_ast_entry);
  5347. peer->self_ast_entry = NULL;
  5348. }
  5349. qdf_spin_unlock_bh(&soc->ast_lock);
  5350. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  5351. if (tmppeer == peer) {
  5352. found = 1;
  5353. break;
  5354. }
  5355. }
  5356. if (found) {
  5357. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  5358. peer_list_elem);
  5359. } else {
  5360. /*Ignoring the remove operation as peer not found*/
  5361. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  5362. "peer:%pK not found in vdev:%pK peerlist:%pK",
  5363. peer, vdev, &peer->vdev->peer_list);
  5364. }
  5365. /* send peer destroy event to upper layer */
  5366. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  5367. QDF_MAC_ADDR_SIZE);
  5368. peer_cookie.ctx = NULL;
  5369. peer_cookie.ctx = (struct cdp_stats_cookie *)
  5370. peer->wlanstats_ctx;
  5371. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5372. dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
  5373. pdev->soc,
  5374. (void *)&peer_cookie,
  5375. peer->peer_ids[0],
  5376. WDI_NO_VAL,
  5377. pdev->pdev_id);
  5378. #endif
  5379. peer->wlanstats_ctx = NULL;
  5380. /* cleanup the peer data */
  5381. dp_peer_cleanup(vdev, peer, false);
  5382. /* reset this peer related info in vdev */
  5383. dp_vdev_reset_peer(vdev, peer);
  5384. /* save vdev related member in case vdev freed */
  5385. vdev_opmode = vdev->opmode;
  5386. qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
  5387. QDF_MAC_ADDR_SIZE);
  5388. /*
  5389. * check whether the parent vdev is pending for deleting
  5390. * and no peers left.
  5391. */
  5392. if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
  5393. vdev_delete = true;
  5394. /*
  5395. * Now that there are no references to the peer, we can
  5396. * release the peer reference lock.
  5397. */
  5398. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5399. /*
  5400. * Invoke soc.ol_ops->peer_unref_delete out of
  5401. * peer_ref_mutex in case deadlock issue.
  5402. */
  5403. dp_peer_release_mem(soc, pdev, peer,
  5404. vdev_opmode,
  5405. vdev_mac_addr);
  5406. /*
  5407. * Delete the vdev if it's waiting all peer deleted
  5408. * and it's chance now.
  5409. */
  5410. if (vdev_delete)
  5411. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  5412. } else {
  5413. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5414. }
  5415. }
  5416. #ifdef PEER_CACHE_RX_PKTS
  5417. static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
  5418. {
  5419. dp_rx_flush_rx_cached(peer, true);
  5420. qdf_list_destroy(&peer->bufq_info.cached_bufq);
  5421. qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
  5422. }
  5423. #else
  5424. static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
  5425. {
  5426. }
  5427. #endif
  5428. /*
  5429. * dp_peer_detach_wifi3() – Detach txrx peer
  5430. * @soc: soc handle
  5431. * @vdev_id: id of dp handle
  5432. * @peer_mac: mac of datapath PEER handle
  5433. * @bitmap: bitmap indicating special handling of request.
  5434. *
  5435. */
  5436. static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  5437. uint8_t *peer_mac, uint32_t bitmap)
  5438. {
  5439. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  5440. peer_mac, 0, vdev_id);
  5441. /* Peer can be null for monitor vap mac address */
  5442. if (!peer) {
  5443. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  5444. "%s: Invalid peer\n", __func__);
  5445. return QDF_STATUS_E_FAILURE;
  5446. }
  5447. peer->valid = 0;
  5448. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  5449. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  5450. dp_local_peer_id_free(peer->vdev->pdev, peer);
  5451. dp_peer_rx_bufq_resources_deinit(peer);
  5452. qdf_spinlock_destroy(&peer->peer_info_lock);
  5453. dp_peer_multipass_list_remove(peer);
  5454. if (wlan_op_mode_sta == peer->vdev->opmode &&
  5455. qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
  5456. QDF_MAC_ADDR_SIZE) != 0) {
  5457. dp_set_ignore_reo_status_cb(peer->vdev->pdev->soc, true);
  5458. }
  5459. /*
  5460. * Remove the reference added during peer_attach.
  5461. * The peer will still be left allocated until the
  5462. * PEER_UNMAP message arrives to remove the other
  5463. * reference, added by the PEER_MAP message.
  5464. */
  5465. dp_peer_unref_delete(peer);
  5466. /*
  5467. * Remove the reference taken above
  5468. */
  5469. dp_peer_unref_delete(peer);
  5470. return QDF_STATUS_SUCCESS;
  5471. }
  5472. /*
  5473. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  5474. * @soc_hdl: Datapath soc handle
  5475. * @vdev_id: virtual interface id
  5476. *
  5477. * Return: MAC address on success, NULL on failure.
  5478. *
  5479. */
  5480. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
  5481. uint8_t vdev_id)
  5482. {
  5483. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5484. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  5485. if (!vdev)
  5486. return NULL;
  5487. return vdev->mac_addr.raw;
  5488. }
  5489. /*
  5490. * dp_vdev_set_wds() - Enable per packet stats
  5491. * @soc: DP soc handle
  5492. * @vdev_id: id of DP VDEV handle
  5493. * @val: value
  5494. *
  5495. * Return: none
  5496. */
  5497. static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
  5498. {
  5499. struct dp_vdev *vdev =
  5500. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  5501. vdev_id);
  5502. if (!vdev)
  5503. return QDF_STATUS_E_FAILURE;
  5504. vdev->wds_enabled = val;
  5505. return QDF_STATUS_SUCCESS;
  5506. }
  5507. /*
  5508. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
  5509. * @soc_hdl: datapath soc handle
  5510. * @pdev_id: physical device instance id
  5511. *
  5512. * Return: virtual interface id
  5513. */
  5514. static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
  5515. uint8_t pdev_id)
  5516. {
  5517. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5518. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  5519. if (qdf_unlikely(!pdev))
  5520. return -EINVAL;
  5521. return pdev->monitor_vdev->vdev_id;
  5522. }
  5523. static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  5524. {
  5525. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5526. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  5527. if (!vdev) {
  5528. dp_err("vdev for id %d is NULL", vdev_id);
  5529. return -EINVAL;
  5530. }
  5531. return vdev->opmode;
  5532. }
  5533. /**
  5534. * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
  5535. * @soc_hdl: ol_txrx_soc_handle handle
  5536. * @vdev_id: vdev id for which os rx handles are needed
  5537. * @stack_fn_p: pointer to stack function pointer
  5538. * @osif_handle_p: pointer to ol_osif_vdev_handle
  5539. *
  5540. * Return: void
  5541. */
  5542. static
  5543. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
  5544. uint8_t vdev_id,
  5545. ol_txrx_rx_fp *stack_fn_p,
  5546. ol_osif_vdev_handle *osif_vdev_p)
  5547. {
  5548. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5549. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  5550. if (!vdev)
  5551. return;
  5552. *stack_fn_p = vdev->osif_rx_stack;
  5553. *osif_vdev_p = vdev->osif_vdev;
  5554. }
  5555. /**
  5556. * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
  5557. * @soc_hdl: datapath soc handle
  5558. * @vdev_id: virtual device/interface id
  5559. *
  5560. * Return: Handle to control pdev
  5561. */
  5562. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
  5563. struct cdp_soc_t *soc_hdl,
  5564. uint8_t vdev_id)
  5565. {
  5566. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5567. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  5568. struct dp_pdev *pdev;
  5569. if (!vdev || !vdev->pdev)
  5570. return NULL;
  5571. pdev = vdev->pdev;
  5572. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  5573. }
  5574. /**
  5575. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  5576. * ring based on target
  5577. * @soc: soc handle
  5578. * @mac_for_pdev: pdev_id
  5579. * @pdev: physical device handle
  5580. * @ring_num: mac id
  5581. * @htt_tlv_filter: tlv filter
  5582. *
  5583. * Return: zero on success, non-zero on failure
  5584. */
  5585. static inline
  5586. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  5587. struct dp_pdev *pdev, uint8_t ring_num,
  5588. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  5589. {
  5590. QDF_STATUS status;
  5591. if (soc->wlan_cfg_ctx->rxdma1_enable)
  5592. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5593. pdev->rxdma_mon_buf_ring[ring_num]
  5594. .hal_srng,
  5595. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  5596. &htt_tlv_filter);
  5597. else
  5598. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5599. pdev->rx_mac_buf_ring[ring_num]
  5600. .hal_srng,
  5601. RXDMA_BUF, RX_BUFFER_SIZE,
  5602. &htt_tlv_filter);
  5603. return status;
  5604. }
  5605. static inline void
  5606. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  5607. {
  5608. pdev->mcopy_mode = 0;
  5609. qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
  5610. }
  5611. /**
  5612. * dp_reset_monitor_mode() - Disable monitor mode
  5613. * @pdev_handle: Datapath PDEV handle
  5614. *
  5615. * Return: QDF_STATUS
  5616. */
  5617. QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  5618. {
  5619. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5620. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  5621. struct dp_soc *soc = pdev->soc;
  5622. uint8_t pdev_id;
  5623. int mac_id;
  5624. QDF_STATUS status = QDF_STATUS_SUCCESS;
  5625. pdev_id = pdev->pdev_id;
  5626. soc = pdev->soc;
  5627. qdf_spin_lock_bh(&pdev->mon_lock);
  5628. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5629. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5630. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5631. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5632. pdev, mac_id,
  5633. htt_tlv_filter);
  5634. if (status != QDF_STATUS_SUCCESS) {
  5635. dp_err("Failed to send tlv filter for monitor mode rings");
  5636. qdf_spin_unlock_bh(&pdev->mon_lock);
  5637. return status;
  5638. }
  5639. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5640. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5641. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  5642. &htt_tlv_filter);
  5643. }
  5644. pdev->monitor_vdev = NULL;
  5645. if (pdev->mcopy_mode)
  5646. dp_pdev_disable_mcopy_code(pdev);
  5647. pdev->monitor_configured = false;
  5648. qdf_spin_unlock_bh(&pdev->mon_lock);
  5649. return QDF_STATUS_SUCCESS;
  5650. }
  5651. /**
  5652. * dp_set_nac() - set peer_nac
  5653. * @soc: soc handle
  5654. * @vdev_id: id of dp handle
  5655. * @peer_mac: mac of datapath PEER handle
  5656. *
  5657. * Return: void
  5658. */
  5659. static void dp_set_nac(struct cdp_soc_t *soc, uint8_t vdev_id,
  5660. uint8_t *peer_mac)
  5661. {
  5662. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  5663. peer_mac, 0, vdev_id);
  5664. if (!peer || peer->delete_in_progress)
  5665. goto fail;
  5666. peer->nac = 1;
  5667. fail:
  5668. if (peer)
  5669. dp_peer_unref_delete(peer);
  5670. return;
  5671. }
  5672. /**
  5673. * dp_get_tx_pending() - read pending tx
  5674. * @pdev_handle: Datapath PDEV handle
  5675. *
  5676. * Return: outstanding tx
  5677. */
  5678. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  5679. {
  5680. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5681. return qdf_atomic_read(&pdev->num_tx_outstanding);
  5682. }
  5683. /**
  5684. * dp_get_peer_mac_from_peer_id() - get peer mac
  5685. * @pdev_handle: Datapath PDEV handle
  5686. * @peer_id: Peer ID
  5687. * @peer_mac: MAC addr of PEER
  5688. *
  5689. * Return: QDF_STATUS
  5690. */
  5691. static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
  5692. uint32_t peer_id,
  5693. uint8_t *peer_mac)
  5694. {
  5695. struct dp_peer *peer;
  5696. if (soc && peer_mac) {
  5697. peer = dp_peer_find_by_id((struct dp_soc *)soc,
  5698. (uint16_t)peer_id);
  5699. if (peer) {
  5700. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  5701. QDF_MAC_ADDR_SIZE);
  5702. dp_peer_unref_del_find_by_id(peer);
  5703. return QDF_STATUS_SUCCESS;
  5704. }
  5705. }
  5706. return QDF_STATUS_E_FAILURE;
  5707. }
  5708. /**
  5709. * dp_pdev_configure_monitor_rings() - configure monitor rings
  5710. * @vdev_handle: Datapath VDEV handle
  5711. *
  5712. * Return: QDF_STATUS
  5713. */
  5714. QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
  5715. {
  5716. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  5717. struct dp_soc *soc;
  5718. uint8_t pdev_id;
  5719. int mac_id;
  5720. QDF_STATUS status = QDF_STATUS_SUCCESS;
  5721. pdev_id = pdev->pdev_id;
  5722. soc = pdev->soc;
  5723. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  5724. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  5725. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  5726. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  5727. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  5728. pdev->mo_data_filter);
  5729. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5730. htt_tlv_filter.mpdu_start = 1;
  5731. htt_tlv_filter.msdu_start = 1;
  5732. htt_tlv_filter.packet = 1;
  5733. htt_tlv_filter.msdu_end = 1;
  5734. htt_tlv_filter.mpdu_end = 1;
  5735. htt_tlv_filter.packet_header = 1;
  5736. htt_tlv_filter.attention = 1;
  5737. htt_tlv_filter.ppdu_start = 0;
  5738. htt_tlv_filter.ppdu_end = 0;
  5739. htt_tlv_filter.ppdu_end_user_stats = 0;
  5740. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  5741. htt_tlv_filter.ppdu_end_status_done = 0;
  5742. htt_tlv_filter.header_per_msdu = 1;
  5743. htt_tlv_filter.enable_fp =
  5744. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  5745. htt_tlv_filter.enable_md = 0;
  5746. htt_tlv_filter.enable_mo =
  5747. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  5748. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  5749. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  5750. if (pdev->mcopy_mode) {
  5751. htt_tlv_filter.fp_data_filter = 0;
  5752. htt_tlv_filter.mo_data_filter = 0;
  5753. } else {
  5754. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  5755. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  5756. }
  5757. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  5758. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  5759. htt_tlv_filter.offset_valid = false;
  5760. if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
  5761. (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
  5762. htt_tlv_filter.fp_mgmt_filter = 0;
  5763. htt_tlv_filter.fp_ctrl_filter = 0;
  5764. htt_tlv_filter.fp_data_filter = 0;
  5765. htt_tlv_filter.mo_mgmt_filter = 0;
  5766. htt_tlv_filter.mo_ctrl_filter = 0;
  5767. htt_tlv_filter.mo_data_filter = 0;
  5768. }
  5769. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5770. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5771. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5772. pdev, mac_id,
  5773. htt_tlv_filter);
  5774. if (status != QDF_STATUS_SUCCESS) {
  5775. dp_err("Failed to send tlv filter for monitor mode rings");
  5776. return status;
  5777. }
  5778. }
  5779. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5780. htt_tlv_filter.mpdu_start = 1;
  5781. htt_tlv_filter.msdu_start = 0;
  5782. htt_tlv_filter.packet = 0;
  5783. htt_tlv_filter.msdu_end = 0;
  5784. htt_tlv_filter.mpdu_end = 0;
  5785. if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
  5786. (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
  5787. htt_tlv_filter.mpdu_end = 1;
  5788. }
  5789. htt_tlv_filter.attention = 0;
  5790. htt_tlv_filter.ppdu_start = 1;
  5791. htt_tlv_filter.ppdu_end = 1;
  5792. htt_tlv_filter.ppdu_end_user_stats = 1;
  5793. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5794. htt_tlv_filter.ppdu_end_status_done = 1;
  5795. htt_tlv_filter.enable_fp = 1;
  5796. htt_tlv_filter.enable_md = 0;
  5797. htt_tlv_filter.enable_mo = 1;
  5798. if (pdev->mcopy_mode ||
  5799. (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
  5800. htt_tlv_filter.packet_header = 1;
  5801. if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
  5802. htt_tlv_filter.header_per_msdu = 0;
  5803. htt_tlv_filter.enable_mo = 0;
  5804. } else if (pdev->rx_enh_capture_mode ==
  5805. CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
  5806. bool is_rx_mon_proto_flow_tag_enabled =
  5807. wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(
  5808. soc->wlan_cfg_ctx);
  5809. htt_tlv_filter.header_per_msdu = 1;
  5810. htt_tlv_filter.enable_mo = 0;
  5811. if (pdev->is_rx_enh_capture_trailer_enabled ||
  5812. is_rx_mon_proto_flow_tag_enabled)
  5813. htt_tlv_filter.msdu_end = 1;
  5814. }
  5815. }
  5816. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5817. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5818. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5819. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5820. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5821. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5822. htt_tlv_filter.offset_valid = false;
  5823. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5824. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  5825. pdev->pdev_id);
  5826. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5827. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5828. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5829. }
  5830. return status;
  5831. }
  5832. /**
  5833. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  5834. * @vdev_handle: Datapath VDEV handle
  5835. * @smart_monitor: Flag to denote if its smart monitor mode
  5836. *
  5837. * Return: 0 on success, not 0 on failure
  5838. */
  5839. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
  5840. uint8_t vdev_id,
  5841. uint8_t special_monitor)
  5842. {
  5843. struct dp_pdev *pdev;
  5844. struct dp_vdev *vdev =
  5845. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  5846. vdev_id);
  5847. if (!vdev)
  5848. return QDF_STATUS_E_FAILURE;
  5849. pdev = vdev->pdev;
  5850. pdev->monitor_vdev = vdev;
  5851. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  5852. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  5853. pdev, pdev->pdev_id, pdev->soc, vdev);
  5854. /*
  5855. * do not configure monitor buf ring and filter for smart and
  5856. * lite monitor
  5857. * for smart monitor filters are added along with first NAC
  5858. * for lite monitor required configuration done through
  5859. * dp_set_pdev_param
  5860. */
  5861. if (special_monitor)
  5862. return QDF_STATUS_SUCCESS;
  5863. /*Check if current pdev's monitor_vdev exists */
  5864. if (pdev->monitor_configured) {
  5865. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  5866. "monitor vap already created vdev=%pK\n", vdev);
  5867. return QDF_STATUS_E_RESOURCES;
  5868. }
  5869. pdev->monitor_configured = true;
  5870. dp_mon_buf_delayed_replenish(pdev);
  5871. return dp_pdev_configure_monitor_rings(pdev);
  5872. }
  5873. /**
  5874. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  5875. * @pdev_handle: Datapath PDEV handle
  5876. * @filter_val: Flag to select Filter for monitor mode
  5877. * Return: 0 on success, not 0 on failure
  5878. */
  5879. static QDF_STATUS
  5880. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  5881. struct cdp_monitor_filter *filter_val)
  5882. {
  5883. /* Many monitor VAPs can exists in a system but only one can be up at
  5884. * anytime
  5885. */
  5886. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5887. struct dp_vdev *vdev = pdev->monitor_vdev;
  5888. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  5889. struct dp_soc *soc;
  5890. uint8_t pdev_id;
  5891. int mac_id;
  5892. QDF_STATUS status = QDF_STATUS_SUCCESS;
  5893. pdev_id = pdev->pdev_id;
  5894. soc = pdev->soc;
  5895. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  5896. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  5897. pdev, pdev_id, soc, vdev);
  5898. /*Check if current pdev's monitor_vdev exists */
  5899. if (!pdev->monitor_vdev) {
  5900. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5901. "vdev=%pK", vdev);
  5902. qdf_assert(vdev);
  5903. }
  5904. /* update filter mode, type in pdev structure */
  5905. pdev->mon_filter_mode = filter_val->mode;
  5906. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  5907. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  5908. pdev->fp_data_filter = filter_val->fp_data;
  5909. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  5910. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  5911. pdev->mo_data_filter = filter_val->mo_data;
  5912. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  5913. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  5914. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  5915. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  5916. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  5917. pdev->mo_data_filter);
  5918. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5919. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5920. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5921. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5922. pdev, mac_id,
  5923. htt_tlv_filter);
  5924. if (status != QDF_STATUS_SUCCESS) {
  5925. dp_err("Failed to send tlv filter for monitor mode rings");
  5926. return status;
  5927. }
  5928. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5929. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  5930. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  5931. }
  5932. htt_tlv_filter.mpdu_start = 1;
  5933. htt_tlv_filter.msdu_start = 1;
  5934. htt_tlv_filter.packet = 1;
  5935. htt_tlv_filter.msdu_end = 1;
  5936. htt_tlv_filter.mpdu_end = 1;
  5937. htt_tlv_filter.packet_header = 1;
  5938. htt_tlv_filter.attention = 1;
  5939. htt_tlv_filter.ppdu_start = 0;
  5940. htt_tlv_filter.ppdu_end = 0;
  5941. htt_tlv_filter.ppdu_end_user_stats = 0;
  5942. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  5943. htt_tlv_filter.ppdu_end_status_done = 0;
  5944. htt_tlv_filter.header_per_msdu = 1;
  5945. htt_tlv_filter.enable_fp =
  5946. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  5947. htt_tlv_filter.enable_md = 0;
  5948. htt_tlv_filter.enable_mo =
  5949. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  5950. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  5951. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  5952. if (pdev->mcopy_mode)
  5953. htt_tlv_filter.fp_data_filter = 0;
  5954. else
  5955. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  5956. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  5957. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  5958. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  5959. htt_tlv_filter.offset_valid = false;
  5960. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5961. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  5962. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  5963. pdev, mac_id,
  5964. htt_tlv_filter);
  5965. if (status != QDF_STATUS_SUCCESS) {
  5966. dp_err("Failed to send tlv filter for monitor mode rings");
  5967. return status;
  5968. }
  5969. }
  5970. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  5971. htt_tlv_filter.mpdu_start = 1;
  5972. htt_tlv_filter.msdu_start = 0;
  5973. htt_tlv_filter.packet = 0;
  5974. htt_tlv_filter.msdu_end = 0;
  5975. htt_tlv_filter.mpdu_end = 0;
  5976. htt_tlv_filter.attention = 0;
  5977. htt_tlv_filter.ppdu_start = 1;
  5978. htt_tlv_filter.ppdu_end = 1;
  5979. htt_tlv_filter.ppdu_end_user_stats = 1;
  5980. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5981. htt_tlv_filter.ppdu_end_status_done = 1;
  5982. htt_tlv_filter.enable_fp = 1;
  5983. htt_tlv_filter.enable_md = 0;
  5984. htt_tlv_filter.enable_mo = 1;
  5985. if (pdev->mcopy_mode) {
  5986. htt_tlv_filter.packet_header = 1;
  5987. }
  5988. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5989. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5990. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5991. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5992. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5993. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5994. htt_tlv_filter.offset_valid = false;
  5995. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  5996. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  5997. pdev->pdev_id);
  5998. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  5999. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6000. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6001. }
  6002. return QDF_STATUS_SUCCESS;
  6003. }
  6004. /**
  6005. * dp_pdev_set_monitor_channel() - set monitor channel num in pdev
  6006. * @pdev_handle: Datapath PDEV handle
  6007. *
  6008. * Return: None
  6009. */
  6010. static
  6011. void dp_pdev_set_monitor_channel(struct cdp_pdev *pdev_handle, int chan_num)
  6012. {
  6013. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6014. pdev->mon_chan_num = chan_num;
  6015. }
  6016. /**
  6017. * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
  6018. * @pdev_handle: Datapath PDEV handle
  6019. * @nbuf: Management frame buffer
  6020. */
  6021. static void
  6022. dp_deliver_tx_mgmt(struct cdp_pdev *pdev_handle, qdf_nbuf_t nbuf)
  6023. {
  6024. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6025. dp_deliver_mgmt_frm(pdev, nbuf);
  6026. }
  6027. /**
  6028. * dp_set_bsscolor() - sets bsscolor for tx capture
  6029. * @pdev_handle: Datapath PDEV handle
  6030. * @bsscolor: new bsscolor
  6031. */
  6032. static void
  6033. dp_mon_set_bsscolor(struct cdp_pdev *pdev_handle, uint8_t bsscolor)
  6034. {
  6035. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6036. pdev->rx_mon_recv_status.bsscolor = bsscolor;
  6037. }
  6038. /**
  6039. * dp_get_pdev_id_frm_pdev() - get pdev_id
  6040. * @pdev_handle: Datapath PDEV handle
  6041. *
  6042. * Return: pdev_id
  6043. */
  6044. static
  6045. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  6046. {
  6047. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6048. return pdev->pdev_id;
  6049. }
  6050. /**
  6051. * dp_get_delay_stats_flag() - get delay stats flag
  6052. * @pdev_handle: Datapath PDEV handle
  6053. *
  6054. * Return: 0 if flag is disabled else 1
  6055. */
  6056. static
  6057. bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
  6058. {
  6059. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6060. return pdev->delay_stats_flag;
  6061. }
  6062. /**
  6063. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  6064. * @pdev_handle: Datapath PDEV handle
  6065. * @chan_noise_floor: Channel Noise Floor
  6066. *
  6067. * Return: void
  6068. */
  6069. static
  6070. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  6071. int16_t chan_noise_floor)
  6072. {
  6073. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6074. pdev->chan_noise_floor = chan_noise_floor;
  6075. }
  6076. /**
  6077. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  6078. * @vdev_handle: Datapath VDEV handle
  6079. * Return: true on ucast filter flag set
  6080. */
  6081. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  6082. {
  6083. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6084. struct dp_pdev *pdev;
  6085. pdev = vdev->pdev;
  6086. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  6087. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  6088. return true;
  6089. return false;
  6090. }
  6091. /**
  6092. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  6093. * @vdev_handle: Datapath VDEV handle
  6094. * Return: true on mcast filter flag set
  6095. */
  6096. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  6097. {
  6098. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6099. struct dp_pdev *pdev;
  6100. pdev = vdev->pdev;
  6101. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  6102. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  6103. return true;
  6104. return false;
  6105. }
  6106. /**
  6107. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  6108. * @vdev_handle: Datapath VDEV handle
  6109. * Return: true on non data filter flag set
  6110. */
  6111. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  6112. {
  6113. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6114. struct dp_pdev *pdev;
  6115. pdev = vdev->pdev;
  6116. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  6117. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  6118. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  6119. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  6120. return true;
  6121. }
  6122. }
  6123. return false;
  6124. }
  6125. #ifdef MESH_MODE_SUPPORT
  6126. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  6127. {
  6128. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  6129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  6130. FL("val %d"), val);
  6131. vdev->mesh_vdev = val;
  6132. }
  6133. /*
  6134. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  6135. * @vdev_hdl: virtual device object
  6136. * @val: value to be set
  6137. *
  6138. * Return: void
  6139. */
  6140. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  6141. {
  6142. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  6143. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  6144. FL("val %d"), val);
  6145. vdev->mesh_rx_filter = val;
  6146. }
  6147. #endif
  6148. bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
  6149. {
  6150. uint8_t pdev_count;
  6151. for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
  6152. if (soc->pdev_list[pdev_count] &&
  6153. soc->pdev_list[pdev_count] == data)
  6154. return true;
  6155. }
  6156. return false;
  6157. }
  6158. /**
  6159. * dp_rx_bar_stats_cb(): BAR received stats callback
  6160. * @soc: SOC handle
  6161. * @cb_ctxt: Call back context
  6162. * @reo_status: Reo status
  6163. *
  6164. * return: void
  6165. */
  6166. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  6167. union hal_reo_status *reo_status)
  6168. {
  6169. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  6170. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  6171. if (!dp_check_pdev_exists(soc, pdev)) {
  6172. dp_err_rl("pdev doesn't exist");
  6173. return;
  6174. }
  6175. if (!qdf_atomic_read(&soc->cmn_init_done))
  6176. return;
  6177. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  6178. DP_PRINT_STATS("REO stats failure %d",
  6179. queue_status->header.status);
  6180. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  6181. return;
  6182. }
  6183. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  6184. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  6185. }
  6186. /**
  6187. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  6188. * @vdev: DP VDEV handle
  6189. *
  6190. * return: void
  6191. */
  6192. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  6193. struct cdp_vdev_stats *vdev_stats)
  6194. {
  6195. struct dp_peer *peer = NULL;
  6196. struct dp_soc *soc = NULL;
  6197. if (!vdev || !vdev->pdev)
  6198. return;
  6199. soc = vdev->pdev->soc;
  6200. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  6201. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  6202. dp_update_vdev_stats(vdev_stats, peer);
  6203. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  6204. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  6205. vdev_stats, vdev->vdev_id,
  6206. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  6207. #endif
  6208. }
  6209. void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  6210. {
  6211. struct dp_vdev *vdev = NULL;
  6212. struct dp_soc *soc;
  6213. struct cdp_vdev_stats *vdev_stats =
  6214. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  6215. if (!vdev_stats) {
  6216. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6217. "DP alloc failure - unable to get alloc vdev stats");
  6218. return;
  6219. }
  6220. qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
  6221. qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
  6222. qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
  6223. if (pdev->mcopy_mode)
  6224. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  6225. soc = pdev->soc;
  6226. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  6227. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  6228. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  6229. dp_aggregate_vdev_stats(vdev, vdev_stats);
  6230. dp_update_pdev_stats(pdev, vdev_stats);
  6231. dp_update_pdev_ingress_stats(pdev, vdev);
  6232. }
  6233. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  6234. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  6235. qdf_mem_free(vdev_stats);
  6236. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  6237. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  6238. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  6239. #endif
  6240. }
  6241. /**
  6242. * dp_vdev_getstats() - get vdev packet level stats
  6243. * @vdev_handle: Datapath VDEV handle
  6244. * @stats: cdp network device stats structure
  6245. *
  6246. * Return: QDF_STATUS
  6247. */
  6248. static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
  6249. struct cdp_dev_stats *stats)
  6250. {
  6251. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6252. struct dp_pdev *pdev;
  6253. struct dp_soc *soc;
  6254. struct cdp_vdev_stats *vdev_stats;
  6255. if (!vdev)
  6256. return QDF_STATUS_E_FAILURE;
  6257. pdev = vdev->pdev;
  6258. if (!pdev)
  6259. return QDF_STATUS_E_FAILURE;
  6260. soc = pdev->soc;
  6261. vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  6262. if (!vdev_stats) {
  6263. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6264. "DP alloc failure - unable to get alloc vdev stats");
  6265. return QDF_STATUS_E_FAILURE;
  6266. }
  6267. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  6268. dp_aggregate_vdev_stats(vdev, vdev_stats);
  6269. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  6270. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  6271. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  6272. stats->tx_errors = vdev_stats->tx.tx_failed +
  6273. vdev_stats->tx_i.dropped.dropped_pkt.num;
  6274. stats->tx_dropped = stats->tx_errors;
  6275. stats->rx_packets = vdev_stats->rx.unicast.num +
  6276. vdev_stats->rx.multicast.num +
  6277. vdev_stats->rx.bcast.num;
  6278. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  6279. vdev_stats->rx.multicast.bytes +
  6280. vdev_stats->rx.bcast.bytes;
  6281. qdf_mem_free(vdev_stats);
  6282. return QDF_STATUS_SUCCESS;
  6283. }
  6284. /**
  6285. * dp_pdev_getstats() - get pdev packet level stats
  6286. * @pdev_handle: Datapath PDEV handle
  6287. * @stats: cdp network device stats structure
  6288. *
  6289. * Return: QDF_STATUS
  6290. */
  6291. static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
  6292. struct cdp_dev_stats *stats)
  6293. {
  6294. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6295. dp_aggregate_pdev_stats(pdev);
  6296. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  6297. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  6298. stats->tx_errors = pdev->stats.tx.tx_failed +
  6299. pdev->stats.tx_i.dropped.dropped_pkt.num;
  6300. stats->tx_dropped = stats->tx_errors;
  6301. stats->rx_packets = pdev->stats.rx.unicast.num +
  6302. pdev->stats.rx.multicast.num +
  6303. pdev->stats.rx.bcast.num;
  6304. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  6305. pdev->stats.rx.multicast.bytes +
  6306. pdev->stats.rx.bcast.bytes;
  6307. stats->rx_errors = pdev->stats.err.desc_alloc_fail +
  6308. pdev->stats.err.ip_csum_err +
  6309. pdev->stats.err.tcp_udp_csum_err +
  6310. pdev->stats.rx.err.mic_err +
  6311. pdev->stats.rx.err.decrypt_err +
  6312. pdev->stats.err.rxdma_error +
  6313. pdev->stats.err.reo_error;
  6314. stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
  6315. pdev->stats.dropped.mec +
  6316. pdev->stats.dropped.mesh_filter +
  6317. pdev->stats.dropped.wifi_parse +
  6318. pdev->stats.dropped.mon_rx_drop +
  6319. pdev->stats.dropped.mon_radiotap_update_err;
  6320. }
  6321. /**
  6322. * dp_get_device_stats() - get interface level packet stats
  6323. * @soc: soc handle
  6324. * @id : vdev_id or pdev_id based on type
  6325. * @stats: cdp network device stats structure
  6326. * @type: device type pdev/vdev
  6327. *
  6328. * Return: QDF_STATUS
  6329. */
  6330. static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
  6331. struct cdp_dev_stats *stats,
  6332. uint8_t type)
  6333. {
  6334. switch (type) {
  6335. case UPDATE_VDEV_STATS:
  6336. return dp_vdev_getstats(
  6337. (struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
  6338. (struct dp_soc *)soc, id), stats);
  6339. case UPDATE_PDEV_STATS:
  6340. {
  6341. struct dp_pdev *pdev =
  6342. dp_get_pdev_from_soc_pdev_id_wifi3(
  6343. (struct dp_soc *)soc,
  6344. id);
  6345. if (pdev) {
  6346. dp_pdev_getstats((struct cdp_pdev *)pdev,
  6347. stats);
  6348. return QDF_STATUS_SUCCESS;
  6349. }
  6350. }
  6351. break;
  6352. default:
  6353. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6354. "apstats cannot be updated for this input "
  6355. "type %d", type);
  6356. break;
  6357. }
  6358. return QDF_STATUS_E_FAILURE;
  6359. }
  6360. const
  6361. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  6362. {
  6363. switch (ring_type) {
  6364. case REO_DST:
  6365. return "Reo_dst";
  6366. case REO_EXCEPTION:
  6367. return "Reo_exception";
  6368. case REO_CMD:
  6369. return "Reo_cmd";
  6370. case REO_REINJECT:
  6371. return "Reo_reinject";
  6372. case REO_STATUS:
  6373. return "Reo_status";
  6374. case WBM2SW_RELEASE:
  6375. return "wbm2sw_release";
  6376. case TCL_DATA:
  6377. return "tcl_data";
  6378. case TCL_CMD:
  6379. return "tcl_cmd";
  6380. case TCL_STATUS:
  6381. return "tcl_status";
  6382. case SW2WBM_RELEASE:
  6383. return "sw2wbm_release";
  6384. case RXDMA_BUF:
  6385. return "Rxdma_buf";
  6386. case RXDMA_DST:
  6387. return "Rxdma_dst";
  6388. case RXDMA_MONITOR_BUF:
  6389. return "Rxdma_monitor_buf";
  6390. case RXDMA_MONITOR_DESC:
  6391. return "Rxdma_monitor_desc";
  6392. case RXDMA_MONITOR_STATUS:
  6393. return "Rxdma_monitor_status";
  6394. default:
  6395. dp_err("Invalid ring type");
  6396. break;
  6397. }
  6398. return "Invalid";
  6399. }
  6400. /*
  6401. * dp_print_napi_stats(): NAPI stats
  6402. * @soc - soc handle
  6403. */
  6404. void dp_print_napi_stats(struct dp_soc *soc)
  6405. {
  6406. hif_print_napi_stats(soc->hif_handle);
  6407. }
  6408. /**
  6409. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  6410. * @vdev: DP_VDEV handle
  6411. *
  6412. * Return: QDF_STATUS
  6413. */
  6414. static inline QDF_STATUS
  6415. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  6416. {
  6417. struct dp_peer *peer = NULL;
  6418. if (!vdev || !vdev->pdev)
  6419. return QDF_STATUS_E_FAILURE;
  6420. DP_STATS_CLR(vdev->pdev);
  6421. DP_STATS_CLR(vdev->pdev->soc);
  6422. DP_STATS_CLR(vdev);
  6423. hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
  6424. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  6425. if (!peer)
  6426. return QDF_STATUS_E_FAILURE;
  6427. DP_STATS_CLR(peer);
  6428. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  6429. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  6430. &peer->stats, peer->peer_ids[0],
  6431. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  6432. #endif
  6433. }
  6434. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  6435. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  6436. &vdev->stats, vdev->vdev_id,
  6437. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  6438. #endif
  6439. return QDF_STATUS_SUCCESS;
  6440. }
  6441. /*
  6442. * dp_get_host_peer_stats()- function to print peer stats
  6443. * @soc: dp_soc handle
  6444. * @mac_addr: mac address of the peer
  6445. *
  6446. * Return: QDF_STATUS
  6447. */
  6448. static QDF_STATUS
  6449. dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
  6450. {
  6451. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6452. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  6453. mac_addr, 0,
  6454. DP_VDEV_ALL);
  6455. if (!peer || peer->delete_in_progress) {
  6456. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6457. "%s: Invalid peer\n", __func__);
  6458. status = QDF_STATUS_E_FAILURE;
  6459. goto fail;
  6460. }
  6461. dp_print_peer_stats(peer);
  6462. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6463. fail:
  6464. if (peer)
  6465. dp_peer_unref_delete(peer);
  6466. return status;
  6467. }
  6468. /**
  6469. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6470. *
  6471. * Return: None
  6472. */
  6473. static void dp_txrx_stats_help(void)
  6474. {
  6475. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6476. dp_info("stats_option:");
  6477. dp_info(" 1 -- HTT Tx Statistics");
  6478. dp_info(" 2 -- HTT Rx Statistics");
  6479. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6480. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6481. dp_info(" 5 -- HTT Error Statistics");
  6482. dp_info(" 6 -- HTT TQM Statistics");
  6483. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6484. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6485. dp_info(" 9 -- HTT Tx Rate Statistics");
  6486. dp_info(" 10 -- HTT Rx Rate Statistics");
  6487. dp_info(" 11 -- HTT Peer Statistics");
  6488. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6489. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6490. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6491. dp_info(" 15 -- HTT SRNG Statistics");
  6492. dp_info(" 16 -- HTT SFM Info Statistics");
  6493. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6494. dp_info(" 18 -- HTT Peer List Details");
  6495. dp_info(" 20 -- Clear Host Statistics");
  6496. dp_info(" 21 -- Host Rx Rate Statistics");
  6497. dp_info(" 22 -- Host Tx Rate Statistics");
  6498. dp_info(" 23 -- Host Tx Statistics");
  6499. dp_info(" 24 -- Host Rx Statistics");
  6500. dp_info(" 25 -- Host AST Statistics");
  6501. dp_info(" 26 -- Host SRNG PTR Statistics");
  6502. dp_info(" 27 -- Host Mon Statistics");
  6503. dp_info(" 28 -- Host REO Queue Statistics");
  6504. dp_info(" 29 -- Host Soc cfg param Statistics");
  6505. dp_info(" 30 -- Host pdev cfg param Statistics");
  6506. }
  6507. /**
  6508. * dp_print_host_stats()- Function to print the stats aggregated at host
  6509. * @vdev_handle: DP_VDEV handle
  6510. * @type: host stats type
  6511. *
  6512. * Return: 0 on success, print error message in case of failure
  6513. */
  6514. static int
  6515. dp_print_host_stats(struct dp_vdev *vdev,
  6516. struct cdp_txrx_stats_req *req)
  6517. {
  6518. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6519. enum cdp_host_txrx_stats type =
  6520. dp_stats_mapping_table[req->stats][STATS_HOST];
  6521. dp_aggregate_pdev_stats(pdev);
  6522. switch (type) {
  6523. case TXRX_CLEAR_STATS:
  6524. dp_txrx_host_stats_clr(vdev);
  6525. break;
  6526. case TXRX_RX_RATE_STATS:
  6527. dp_print_rx_rates(vdev);
  6528. break;
  6529. case TXRX_TX_RATE_STATS:
  6530. dp_print_tx_rates(vdev);
  6531. break;
  6532. case TXRX_TX_HOST_STATS:
  6533. dp_print_pdev_tx_stats(pdev);
  6534. dp_print_soc_tx_stats(pdev->soc);
  6535. break;
  6536. case TXRX_RX_HOST_STATS:
  6537. dp_print_pdev_rx_stats(pdev);
  6538. dp_print_soc_rx_stats(pdev->soc);
  6539. break;
  6540. case TXRX_AST_STATS:
  6541. dp_print_ast_stats(pdev->soc);
  6542. dp_print_peer_table(vdev);
  6543. break;
  6544. case TXRX_SRNG_PTR_STATS:
  6545. dp_print_ring_stats(pdev);
  6546. break;
  6547. case TXRX_RX_MON_STATS:
  6548. dp_print_pdev_rx_mon_stats(pdev);
  6549. break;
  6550. case TXRX_REO_QUEUE_STATS:
  6551. dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
  6552. req->peer_addr);
  6553. break;
  6554. case TXRX_SOC_CFG_PARAMS:
  6555. dp_print_soc_cfg_params(pdev->soc);
  6556. break;
  6557. case TXRX_PDEV_CFG_PARAMS:
  6558. dp_print_pdev_cfg_params(pdev);
  6559. break;
  6560. case TXRX_NAPI_STATS:
  6561. dp_print_napi_stats(pdev->soc);
  6562. case TXRX_SOC_INTERRUPT_STATS:
  6563. dp_print_soc_interrupt_stats(pdev->soc);
  6564. break;
  6565. default:
  6566. dp_info("Wrong Input For TxRx Host Stats");
  6567. dp_txrx_stats_help();
  6568. break;
  6569. }
  6570. return 0;
  6571. }
  6572. /*
  6573. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6574. * @pdev: DP_PDEV handle
  6575. *
  6576. * Return: void
  6577. */
  6578. static void
  6579. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6580. {
  6581. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6582. int mac_id;
  6583. qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
  6584. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6585. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6586. pdev->pdev_id);
  6587. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6588. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6589. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6590. }
  6591. }
  6592. /*
  6593. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6594. * @pdev: DP_PDEV handle
  6595. *
  6596. * Return: void
  6597. */
  6598. static void
  6599. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6600. {
  6601. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6602. int mac_id;
  6603. htt_tlv_filter.mpdu_start = 1;
  6604. htt_tlv_filter.msdu_start = 0;
  6605. htt_tlv_filter.packet = 0;
  6606. htt_tlv_filter.msdu_end = 0;
  6607. htt_tlv_filter.mpdu_end = 0;
  6608. htt_tlv_filter.attention = 0;
  6609. htt_tlv_filter.ppdu_start = 1;
  6610. htt_tlv_filter.ppdu_end = 1;
  6611. htt_tlv_filter.ppdu_end_user_stats = 1;
  6612. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6613. htt_tlv_filter.ppdu_end_status_done = 1;
  6614. htt_tlv_filter.enable_fp = 1;
  6615. htt_tlv_filter.enable_md = 0;
  6616. if (pdev->neighbour_peers_added &&
  6617. pdev->soc->hw_nac_monitor_support) {
  6618. htt_tlv_filter.enable_md = 1;
  6619. htt_tlv_filter.packet_header = 1;
  6620. }
  6621. if (pdev->mcopy_mode) {
  6622. htt_tlv_filter.packet_header = 1;
  6623. htt_tlv_filter.enable_mo = 1;
  6624. }
  6625. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6626. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6627. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6628. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6629. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6630. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6631. if (pdev->neighbour_peers_added &&
  6632. pdev->soc->hw_nac_monitor_support)
  6633. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6634. htt_tlv_filter.offset_valid = false;
  6635. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6636. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6637. pdev->pdev_id);
  6638. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6639. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6640. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6641. }
  6642. }
  6643. /*
  6644. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6645. * modes are enabled or not.
  6646. * @dp_pdev: dp pdev handle.
  6647. *
  6648. * Return: bool
  6649. */
  6650. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6651. {
  6652. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6653. !pdev->mcopy_mode)
  6654. return true;
  6655. else
  6656. return false;
  6657. }
  6658. /*
  6659. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6660. *@pdev_handle: DP_PDEV handle.
  6661. *@val: Provided value.
  6662. *
  6663. *Return: 0 for success. nonzero for failure.
  6664. */
  6665. static QDF_STATUS
  6666. dp_set_bpr_enable(struct dp_pdev *pdev, int val)
  6667. {
  6668. switch (val) {
  6669. case CDP_BPR_DISABLE:
  6670. pdev->bpr_enable = CDP_BPR_DISABLE;
  6671. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6672. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6673. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6674. } else if (pdev->enhanced_stats_en &&
  6675. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6676. !pdev->pktlog_ppdu_stats) {
  6677. dp_h2t_cfg_stats_msg_send(pdev,
  6678. DP_PPDU_STATS_CFG_ENH_STATS,
  6679. pdev->pdev_id);
  6680. }
  6681. break;
  6682. case CDP_BPR_ENABLE:
  6683. pdev->bpr_enable = CDP_BPR_ENABLE;
  6684. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6685. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6686. dp_h2t_cfg_stats_msg_send(pdev,
  6687. DP_PPDU_STATS_CFG_BPR,
  6688. pdev->pdev_id);
  6689. } else if (pdev->enhanced_stats_en &&
  6690. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6691. !pdev->pktlog_ppdu_stats) {
  6692. dp_h2t_cfg_stats_msg_send(pdev,
  6693. DP_PPDU_STATS_CFG_BPR_ENH,
  6694. pdev->pdev_id);
  6695. } else if (pdev->pktlog_ppdu_stats) {
  6696. dp_h2t_cfg_stats_msg_send(pdev,
  6697. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6698. pdev->pdev_id);
  6699. }
  6700. break;
  6701. default:
  6702. break;
  6703. }
  6704. return QDF_STATUS_SUCCESS;
  6705. }
  6706. /*
  6707. * dp_pdev_tid_stats_ingress_inc
  6708. * @pdev: pdev handle
  6709. * @val: increase in value
  6710. *
  6711. * Return: void
  6712. */
  6713. static void
  6714. dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
  6715. {
  6716. pdev->stats.tid_stats.ingress_stack += val;
  6717. }
  6718. /*
  6719. * dp_pdev_tid_stats_osif_drop
  6720. * @pdev: pdev handle
  6721. * @val: increase in value
  6722. *
  6723. * Return: void
  6724. */
  6725. static void
  6726. dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
  6727. {
  6728. pdev->stats.tid_stats.osif_drop += val;
  6729. }
  6730. /*
  6731. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6732. * @pdev: DP_PDEV handle
  6733. * @val: user provided value
  6734. *
  6735. * Return: 0 for success. nonzero for failure.
  6736. */
  6737. static QDF_STATUS
  6738. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6739. {
  6740. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6741. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6742. if (pdev->mcopy_mode)
  6743. dp_reset_monitor_mode(pdev_handle);
  6744. switch (val) {
  6745. case 0:
  6746. pdev->tx_sniffer_enable = 0;
  6747. pdev->monitor_configured = false;
  6748. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6749. !pdev->bpr_enable) {
  6750. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6751. dp_ppdu_ring_reset(pdev);
  6752. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6753. dp_h2t_cfg_stats_msg_send(pdev,
  6754. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6755. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6756. dp_h2t_cfg_stats_msg_send(pdev,
  6757. DP_PPDU_STATS_CFG_BPR_ENH,
  6758. pdev->pdev_id);
  6759. } else {
  6760. dp_h2t_cfg_stats_msg_send(pdev,
  6761. DP_PPDU_STATS_CFG_BPR,
  6762. pdev->pdev_id);
  6763. }
  6764. break;
  6765. case 1:
  6766. pdev->tx_sniffer_enable = 1;
  6767. pdev->monitor_configured = false;
  6768. if (!pdev->pktlog_ppdu_stats)
  6769. dp_h2t_cfg_stats_msg_send(pdev,
  6770. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6771. break;
  6772. case 2:
  6773. if (pdev->monitor_vdev) {
  6774. status = QDF_STATUS_E_RESOURCES;
  6775. break;
  6776. }
  6777. pdev->mcopy_mode = 1;
  6778. dp_pdev_configure_monitor_rings(pdev);
  6779. pdev->monitor_configured = true;
  6780. pdev->tx_sniffer_enable = 0;
  6781. if (!pdev->pktlog_ppdu_stats)
  6782. dp_h2t_cfg_stats_msg_send(pdev,
  6783. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6784. break;
  6785. default:
  6786. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6787. "Invalid value");
  6788. break;
  6789. }
  6790. return status;
  6791. }
  6792. /*
  6793. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6794. * @soc_handle: DP_SOC handle
  6795. * @pdev_id: id of DP_PDEV handle
  6796. *
  6797. * Return: QDF_STATUS
  6798. */
  6799. static QDF_STATUS
  6800. dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  6801. {
  6802. struct dp_pdev *pdev =
  6803. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  6804. pdev_id);
  6805. if (!pdev)
  6806. return QDF_STATUS_E_FAILURE;
  6807. if (pdev->enhanced_stats_en == 0)
  6808. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6809. pdev->enhanced_stats_en = 1;
  6810. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6811. !pdev->monitor_vdev)
  6812. dp_ppdu_ring_cfg(pdev);
  6813. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6814. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6815. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6816. dp_h2t_cfg_stats_msg_send(pdev,
  6817. DP_PPDU_STATS_CFG_BPR_ENH,
  6818. pdev->pdev_id);
  6819. }
  6820. return QDF_STATUS_SUCCESS;
  6821. }
  6822. /*
  6823. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6824. *
  6825. * @param soc - the soc handle
  6826. * @param pdev_id - pdev_id of pdev
  6827. * @return - QDF_STATUS
  6828. */
  6829. static QDF_STATUS
  6830. dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  6831. {
  6832. struct dp_pdev *pdev =
  6833. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  6834. pdev_id);
  6835. if (!pdev)
  6836. return QDF_STATUS_E_FAILURE;
  6837. if (pdev->enhanced_stats_en == 1)
  6838. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6839. pdev->enhanced_stats_en = 0;
  6840. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6841. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6842. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6843. dp_h2t_cfg_stats_msg_send(pdev,
  6844. DP_PPDU_STATS_CFG_BPR,
  6845. pdev->pdev_id);
  6846. }
  6847. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6848. !pdev->monitor_vdev)
  6849. dp_ppdu_ring_reset(pdev);
  6850. return QDF_STATUS_SUCCESS;
  6851. }
  6852. /*
  6853. * dp_get_fw_peer_stats()- function to print peer stats
  6854. * @soc: soc handle
  6855. * @pdev_id : id of the pdev handle
  6856. * @mac_addr: mac address of the peer
  6857. * @cap: Type of htt stats requested
  6858. * @is_wait: if set, wait on completion from firmware response
  6859. *
  6860. * Currently Supporting only MAC ID based requests Only
  6861. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6862. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6863. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6864. *
  6865. * Return: QDF_STATUS
  6866. */
  6867. static QDF_STATUS
  6868. dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
  6869. uint8_t *mac_addr,
  6870. uint32_t cap, uint32_t is_wait)
  6871. {
  6872. int i;
  6873. uint32_t config_param0 = 0;
  6874. uint32_t config_param1 = 0;
  6875. uint32_t config_param2 = 0;
  6876. uint32_t config_param3 = 0;
  6877. struct dp_pdev *pdev =
  6878. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  6879. pdev_id);
  6880. if (!pdev)
  6881. return QDF_STATUS_E_FAILURE;
  6882. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6883. config_param0 |= (1 << (cap + 1));
  6884. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6885. config_param1 |= (1 << i);
  6886. }
  6887. config_param2 |= (mac_addr[0] & 0x000000ff);
  6888. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6889. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6890. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6891. config_param3 |= (mac_addr[4] & 0x000000ff);
  6892. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6893. if (is_wait) {
  6894. qdf_event_reset(&pdev->fw_peer_stats_event);
  6895. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6896. config_param0, config_param1,
  6897. config_param2, config_param3,
  6898. 0, 1, 0);
  6899. qdf_wait_single_event(&pdev->fw_peer_stats_event,
  6900. DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
  6901. } else {
  6902. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6903. config_param0, config_param1,
  6904. config_param2, config_param3,
  6905. 0, 0, 0);
  6906. }
  6907. return QDF_STATUS_SUCCESS;
  6908. }
  6909. /* This struct definition will be removed from here
  6910. * once it get added in FW headers*/
  6911. struct httstats_cmd_req {
  6912. uint32_t config_param0;
  6913. uint32_t config_param1;
  6914. uint32_t config_param2;
  6915. uint32_t config_param3;
  6916. int cookie;
  6917. u_int8_t stats_id;
  6918. };
  6919. /*
  6920. * dp_get_htt_stats: function to process the httstas request
  6921. * @soc: DP soc handle
  6922. * @pdev_id: id of pdev handle
  6923. * @data: pointer to request data
  6924. * @data_len: length for request data
  6925. *
  6926. * return: QDF_STATUS
  6927. */
  6928. static QDF_STATUS
  6929. dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
  6930. uint32_t data_len)
  6931. {
  6932. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6933. struct dp_pdev *pdev =
  6934. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  6935. pdev_id);
  6936. if (!pdev)
  6937. return QDF_STATUS_E_FAILURE;
  6938. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6939. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6940. req->config_param0, req->config_param1,
  6941. req->config_param2, req->config_param3,
  6942. req->cookie, 0, 0);
  6943. return QDF_STATUS_SUCCESS;
  6944. }
  6945. /*
  6946. * dp_set_pdev_param: function to set parameters in pdev
  6947. * @pdev_handle: DP pdev handle
  6948. * @param: parameter type to be set
  6949. * @val: value of parameter to be set
  6950. *
  6951. * Return: 0 for success. nonzero for failure.
  6952. */
  6953. static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6954. enum cdp_pdev_param_type param,
  6955. uint32_t val)
  6956. {
  6957. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6958. switch (param) {
  6959. case CDP_CONFIG_DEBUG_SNIFFER:
  6960. return dp_config_debug_sniffer(pdev_handle, val);
  6961. case CDP_CONFIG_BPR_ENABLE:
  6962. return dp_set_bpr_enable(pdev, val);
  6963. case CDP_CONFIG_PRIMARY_RADIO:
  6964. pdev->is_primary = val;
  6965. break;
  6966. case CDP_CONFIG_CAPTURE_LATENCY:
  6967. if (val == 1)
  6968. pdev->latency_capture_enable = true;
  6969. else
  6970. pdev->latency_capture_enable = false;
  6971. break;
  6972. case CDP_INGRESS_STATS:
  6973. dp_pdev_tid_stats_ingress_inc(pdev, val);
  6974. break;
  6975. case CDP_OSIF_DROP:
  6976. dp_pdev_tid_stats_osif_drop(pdev, val);
  6977. break;
  6978. case CDP_CONFIG_ENH_RX_CAPTURE:
  6979. return dp_config_enh_rx_capture(pdev_handle, val);
  6980. case CDP_CONFIG_TX_CAPTURE:
  6981. return dp_config_enh_tx_capture(pdev_handle, val);
  6982. default:
  6983. return QDF_STATUS_E_INVAL;
  6984. }
  6985. return QDF_STATUS_SUCCESS;
  6986. }
  6987. /*
  6988. * dp_calculate_delay_stats: function to get rx delay stats
  6989. * @vdev_handle: DP vdev handle
  6990. * @nbuf: skb
  6991. *
  6992. * Return: void
  6993. */
  6994. static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
  6995. qdf_nbuf_t nbuf)
  6996. {
  6997. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6998. dp_rx_compute_delay(vdev, nbuf);
  6999. }
  7000. /*
  7001. * dp_get_vdev_param: function to get parameters from vdev
  7002. * @param: parameter type to get value
  7003. *
  7004. * return: void
  7005. */
  7006. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  7007. enum cdp_vdev_param_type param)
  7008. {
  7009. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7010. uint32_t val;
  7011. switch (param) {
  7012. case CDP_ENABLE_WDS:
  7013. val = vdev->wds_enabled;
  7014. break;
  7015. case CDP_ENABLE_MEC:
  7016. val = vdev->mec_enabled;
  7017. break;
  7018. case CDP_ENABLE_DA_WAR:
  7019. val = vdev->pdev->soc->da_war_enabled;
  7020. break;
  7021. default:
  7022. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7023. "param value %d is wrong\n",
  7024. param);
  7025. val = -1;
  7026. break;
  7027. }
  7028. return val;
  7029. }
  7030. /*
  7031. * dp_set_vdev_param: function to set parameters in vdev
  7032. * @param: parameter type to be set
  7033. * @val: value of parameter to be set
  7034. *
  7035. * return: void
  7036. */
  7037. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  7038. enum cdp_vdev_param_type param, uint32_t val)
  7039. {
  7040. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7041. switch (param) {
  7042. case CDP_ENABLE_WDS:
  7043. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7044. "wds_enable %d for vdev(%pK) id(%d)\n",
  7045. val, vdev, vdev->vdev_id);
  7046. vdev->wds_enabled = val;
  7047. break;
  7048. case CDP_ENABLE_MEC:
  7049. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7050. "mec_enable %d for vdev(%pK) id(%d)\n",
  7051. val, vdev, vdev->vdev_id);
  7052. vdev->mec_enabled = val;
  7053. break;
  7054. case CDP_ENABLE_DA_WAR:
  7055. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7056. "da_war_enable %d for vdev(%pK) id(%d)\n",
  7057. val, vdev, vdev->vdev_id);
  7058. vdev->pdev->soc->da_war_enabled = val;
  7059. dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
  7060. vdev->pdev->soc));
  7061. break;
  7062. case CDP_ENABLE_NAWDS:
  7063. vdev->nawds_enabled = val;
  7064. break;
  7065. case CDP_ENABLE_MCAST_EN:
  7066. vdev->mcast_enhancement_en = val;
  7067. break;
  7068. case CDP_ENABLE_PROXYSTA:
  7069. vdev->proxysta_vdev = val;
  7070. break;
  7071. case CDP_UPDATE_TDLS_FLAGS:
  7072. vdev->tdls_link_connected = val;
  7073. break;
  7074. case CDP_CFG_WDS_AGING_TIMER:
  7075. if (val == 0)
  7076. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  7077. else if (val != vdev->wds_aging_timer_val)
  7078. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  7079. vdev->wds_aging_timer_val = val;
  7080. break;
  7081. case CDP_ENABLE_AP_BRIDGE:
  7082. if (wlan_op_mode_sta != vdev->opmode)
  7083. vdev->ap_bridge_enabled = val;
  7084. else
  7085. vdev->ap_bridge_enabled = false;
  7086. break;
  7087. case CDP_ENABLE_CIPHER:
  7088. vdev->sec_type = val;
  7089. break;
  7090. case CDP_ENABLE_QWRAP_ISOLATION:
  7091. vdev->isolation_vdev = val;
  7092. break;
  7093. case CDP_UPDATE_MULTIPASS:
  7094. vdev->multipass_en = val;
  7095. break;
  7096. default:
  7097. break;
  7098. }
  7099. dp_tx_vdev_update_search_flags(vdev);
  7100. }
  7101. /**
  7102. * dp_peer_set_nawds: set nawds bit in peer
  7103. * @peer_handle: pointer to peer
  7104. * @value: enable/disable nawds
  7105. *
  7106. * return: void
  7107. */
  7108. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  7109. {
  7110. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7111. peer->nawds_enabled = value;
  7112. }
  7113. /**
  7114. * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
  7115. * @peer_handle: Peer handle
  7116. * @value: Enable/disable setting for tx_cap_enabled
  7117. *
  7118. * Return: None
  7119. */
  7120. static void
  7121. dp_peer_set_tx_capture_enabled(struct cdp_peer *peer_handle, bool value)
  7122. {
  7123. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7124. peer->tx_cap_enabled = value;
  7125. }
  7126. /**
  7127. * dp_peer_set_rx_capture_enabled: Set rx_cap_enabled bit in peer
  7128. * @peer_handle: Peer handle
  7129. * @value: Enable/disable setting for rx_cap_enabled
  7130. *
  7131. * Return: None
  7132. */
  7133. static void
  7134. dp_peer_set_rx_capture_enabled(struct cdp_peer *peer_handle, bool value)
  7135. {
  7136. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7137. peer->rx_cap_enabled = value;
  7138. }
  7139. /**
  7140. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  7141. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  7142. * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode
  7143. * @peer_mac: MAC address for which the above need to be enabled/disabled
  7144. *
  7145. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  7146. */
  7147. QDF_STATUS
  7148. dp_peer_update_pkt_capture_params(struct cdp_pdev *pdev,
  7149. bool is_rx_pkt_cap_enable,
  7150. bool is_tx_pkt_cap_enable,
  7151. uint8_t *peer_mac)
  7152. {
  7153. struct dp_peer *peer;
  7154. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev,
  7155. peer_mac);
  7156. if (!peer) {
  7157. dp_err("Invalid Peer");
  7158. return QDF_STATUS_E_FAILURE;
  7159. }
  7160. dp_peer_set_rx_capture_enabled((struct cdp_peer *)peer,
  7161. is_rx_pkt_cap_enable);
  7162. dp_peer_set_tx_capture_enabled((struct cdp_peer *)peer,
  7163. is_tx_pkt_cap_enable);
  7164. return QDF_STATUS_SUCCESS;
  7165. }
  7166. /*
  7167. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  7168. * @soc: DP_SOC handle
  7169. * @vdev_id: id of DP_VDEV handle
  7170. * @map_id:ID of map that needs to be updated
  7171. *
  7172. * Return: QDF_STATUS
  7173. */
  7174. static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
  7175. uint8_t vdev_id,
  7176. uint8_t map_id)
  7177. {
  7178. struct dp_vdev *vdev =
  7179. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  7180. vdev_id);
  7181. if (vdev) {
  7182. vdev->dscp_tid_map_id = map_id;
  7183. return QDF_STATUS_SUCCESS;
  7184. }
  7185. return QDF_STATUS_E_FAILURE;
  7186. }
  7187. #ifdef DP_RATETABLE_SUPPORT
  7188. static int dp_txrx_get_ratekbps(int preamb, int mcs,
  7189. int htflag, int gintval)
  7190. {
  7191. uint32_t rix;
  7192. uint16_t ratecode;
  7193. return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
  7194. (uint8_t)preamb, 1, &rix, &ratecode);
  7195. }
  7196. #else
  7197. static int dp_txrx_get_ratekbps(int preamb, int mcs,
  7198. int htflag, int gintval)
  7199. {
  7200. return 0;
  7201. }
  7202. #endif
  7203. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  7204. * @soc: DP soc handle
  7205. * @pdev_id: id of DP pdev handle
  7206. * @pdev_stats: buffer to copy to
  7207. *
  7208. * return : status success/failure
  7209. */
  7210. static QDF_STATUS
  7211. dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
  7212. struct cdp_pdev_stats *pdev_stats)
  7213. {
  7214. struct dp_pdev *pdev =
  7215. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7216. pdev_id);
  7217. if (!pdev)
  7218. return QDF_STATUS_E_FAILURE;
  7219. dp_aggregate_pdev_stats(pdev);
  7220. qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
  7221. return QDF_STATUS_SUCCESS;
  7222. }
  7223. /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
  7224. * @vdev_handle: DP vdev handle
  7225. * @buf: buffer containing specific stats structure
  7226. *
  7227. * Returns: void
  7228. */
  7229. static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
  7230. void *buf)
  7231. {
  7232. struct cdp_tx_ingress_stats *host_stats = NULL;
  7233. if (!buf) {
  7234. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7235. "Invalid host stats buf");
  7236. return;
  7237. }
  7238. host_stats = (struct cdp_tx_ingress_stats *)buf;
  7239. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
  7240. host_stats->mcast_en.mcast_pkt.num,
  7241. host_stats->mcast_en.mcast_pkt.bytes);
  7242. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
  7243. host_stats->mcast_en.dropped_map_error);
  7244. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
  7245. host_stats->mcast_en.dropped_self_mac);
  7246. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
  7247. host_stats->mcast_en.dropped_send_fail);
  7248. DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
  7249. host_stats->mcast_en.ucast);
  7250. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
  7251. host_stats->mcast_en.fail_seg_alloc);
  7252. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
  7253. host_stats->mcast_en.clone_fail);
  7254. }
  7255. /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
  7256. * @soc: DP soc handle
  7257. * @vdev_id: id of DP vdev handle
  7258. * @buf: buffer containing specific stats structure
  7259. * @stats_id: stats type
  7260. *
  7261. * Returns: QDF_STATUS
  7262. */
  7263. static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
  7264. uint8_t vdev_id,
  7265. void *buf,
  7266. uint16_t stats_id)
  7267. {
  7268. struct dp_vdev *vdev =
  7269. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  7270. vdev_id);
  7271. if (!vdev) {
  7272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7273. "Invalid vdev handle");
  7274. return QDF_STATUS_E_FAILURE;
  7275. }
  7276. switch (stats_id) {
  7277. case DP_VDEV_STATS_PKT_CNT_ONLY:
  7278. break;
  7279. case DP_VDEV_STATS_TX_ME:
  7280. dp_txrx_update_vdev_me_stats(vdev, buf);
  7281. break;
  7282. default:
  7283. qdf_info("Invalid stats_id %d", stats_id);
  7284. break;
  7285. }
  7286. return QDF_STATUS_SUCCESS;
  7287. }
  7288. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  7289. * @soc: soc handle
  7290. * @vdev_id: id of vdev handle
  7291. * @peer_mac: mac of DP_PEER handle
  7292. * @peer_stats: buffer to copy to
  7293. * return : status success/failure
  7294. */
  7295. static QDF_STATUS
  7296. dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
  7297. uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
  7298. {
  7299. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7300. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  7301. peer_mac, 0, vdev_id);
  7302. if (!peer || peer->delete_in_progress) {
  7303. status = QDF_STATUS_E_FAILURE;
  7304. goto fail;
  7305. } else
  7306. qdf_mem_copy(peer_stats, &peer->stats,
  7307. sizeof(struct cdp_peer_stats));
  7308. fail:
  7309. if (peer)
  7310. dp_peer_unref_delete(peer);
  7311. return status;
  7312. }
  7313. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  7314. * @soc: soc handle
  7315. * @vdev_id: id of vdev handle
  7316. * @peer_mac: mac of DP_PEER handle
  7317. *
  7318. * return : QDF_STATUS
  7319. */
  7320. static QDF_STATUS
  7321. dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
  7322. uint8_t *peer_mac)
  7323. {
  7324. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7325. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  7326. peer_mac, 0, vdev_id);
  7327. if (!peer || peer->delete_in_progress) {
  7328. status = QDF_STATUS_E_FAILURE;
  7329. goto fail;
  7330. }
  7331. qdf_mem_zero(&peer->stats, sizeof(peer->stats));
  7332. fail:
  7333. if (peer)
  7334. dp_peer_unref_delete(peer);
  7335. return status;
  7336. }
  7337. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  7338. * @vdev_handle: DP_VDEV handle
  7339. * @buf: buffer for vdev stats
  7340. *
  7341. * return : int
  7342. */
  7343. static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
  7344. void *buf, bool is_aggregate)
  7345. {
  7346. struct cdp_vdev_stats *vdev_stats;
  7347. struct dp_pdev *pdev;
  7348. struct dp_vdev *vdev =
  7349. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  7350. vdev_id);
  7351. if (!vdev)
  7352. return 1;
  7353. pdev = vdev->pdev;
  7354. if (!pdev)
  7355. return 1;
  7356. vdev_stats = (struct cdp_vdev_stats *)buf;
  7357. if (is_aggregate) {
  7358. qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
  7359. dp_aggregate_vdev_stats(vdev, buf);
  7360. qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
  7361. } else {
  7362. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  7363. }
  7364. return 0;
  7365. }
  7366. /*
  7367. * dp_get_total_per(): get total per
  7368. * @soc: DP soc handle
  7369. * @pdev_id: id of DP_PDEV handle
  7370. *
  7371. * Return: % error rate using retries per packet and success packets
  7372. */
  7373. static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
  7374. {
  7375. struct dp_pdev *pdev =
  7376. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7377. pdev_id);
  7378. if (!pdev)
  7379. return 0;
  7380. dp_aggregate_pdev_stats(pdev);
  7381. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  7382. return 0;
  7383. return ((pdev->stats.tx.retries * 100) /
  7384. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  7385. }
  7386. /*
  7387. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  7388. * @soc: DP soc handle
  7389. * @pdev_id: id of DP_PDEV handle
  7390. * @buf: to hold pdev_stats
  7391. *
  7392. * Return: int
  7393. */
  7394. static int
  7395. dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
  7396. struct cdp_stats_extd *buf)
  7397. {
  7398. struct cdp_txrx_stats_req req = {0,};
  7399. struct dp_pdev *pdev =
  7400. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7401. pdev_id);
  7402. if (!pdev)
  7403. return TXRX_STATS_LEVEL_OFF;
  7404. dp_aggregate_pdev_stats(pdev);
  7405. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  7406. req.cookie_val = 1;
  7407. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7408. req.param1, req.param2, req.param3, 0,
  7409. req.cookie_val, 0);
  7410. msleep(DP_MAX_SLEEP_TIME);
  7411. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  7412. req.cookie_val = 1;
  7413. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7414. req.param1, req.param2, req.param3, 0,
  7415. req.cookie_val, 0);
  7416. msleep(DP_MAX_SLEEP_TIME);
  7417. qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_stats_extd));
  7418. return TXRX_STATS_LEVEL;
  7419. }
  7420. /**
  7421. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  7422. * @soc: soc handle
  7423. * @pdev_id: id of DP_PDEV handle
  7424. * @map_id: ID of map that needs to be updated
  7425. * @tos: index value in map
  7426. * @tid: tid value passed by the user
  7427. *
  7428. * Return: QDF_STATUS
  7429. */
  7430. static QDF_STATUS
  7431. dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
  7432. uint8_t pdev_id,
  7433. uint8_t map_id,
  7434. uint8_t tos, uint8_t tid)
  7435. {
  7436. uint8_t dscp;
  7437. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7438. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  7439. if (!pdev)
  7440. return QDF_STATUS_E_FAILURE;
  7441. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  7442. pdev->dscp_tid_map[map_id][dscp] = tid;
  7443. if (map_id < soc->num_hw_dscp_tid_map)
  7444. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  7445. map_id, dscp);
  7446. else
  7447. return QDF_STATUS_E_FAILURE;
  7448. return QDF_STATUS_SUCCESS;
  7449. }
  7450. /**
  7451. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  7452. * @pdev_handle: pdev handle
  7453. * @val: hmmc-dscp flag value
  7454. *
  7455. * Return: void
  7456. */
  7457. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  7458. bool val)
  7459. {
  7460. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7461. pdev->hmmc_tid_override_en = val;
  7462. }
  7463. /**
  7464. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  7465. * @pdev_handle: pdev handle
  7466. * @tid: tid value
  7467. *
  7468. * Return: void
  7469. */
  7470. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7471. uint8_t tid)
  7472. {
  7473. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7474. pdev->hmmc_tid = tid;
  7475. }
  7476. /**
  7477. * dp_fw_stats_process(): Process TxRX FW stats request
  7478. * @vdev_handle: DP VDEV handle
  7479. * @req: stats request
  7480. *
  7481. * return: int
  7482. */
  7483. static int dp_fw_stats_process(struct dp_vdev *vdev,
  7484. struct cdp_txrx_stats_req *req)
  7485. {
  7486. struct dp_pdev *pdev = NULL;
  7487. uint32_t stats = req->stats;
  7488. uint8_t mac_id = req->mac_id;
  7489. if (!vdev) {
  7490. DP_TRACE(NONE, "VDEV not found");
  7491. return 1;
  7492. }
  7493. pdev = vdev->pdev;
  7494. /*
  7495. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7496. * from param0 to param3 according to below rule:
  7497. *
  7498. * PARAM:
  7499. * - config_param0 : start_offset (stats type)
  7500. * - config_param1 : stats bmask from start offset
  7501. * - config_param2 : stats bmask from start offset + 32
  7502. * - config_param3 : stats bmask from start offset + 64
  7503. */
  7504. if (req->stats == CDP_TXRX_STATS_0) {
  7505. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7506. req->param1 = 0xFFFFFFFF;
  7507. req->param2 = 0xFFFFFFFF;
  7508. req->param3 = 0xFFFFFFFF;
  7509. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7510. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7511. }
  7512. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7513. req->param1, req->param2, req->param3,
  7514. 0, 0, mac_id);
  7515. }
  7516. /**
  7517. * dp_txrx_stats_request - function to map to firmware and host stats
  7518. * @soc: soc handle
  7519. * @vdev_id: virtual device ID
  7520. * @req: stats request
  7521. *
  7522. * Return: QDF_STATUS
  7523. */
  7524. static
  7525. QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
  7526. uint8_t vdev_id,
  7527. struct cdp_txrx_stats_req *req)
  7528. {
  7529. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
  7530. int host_stats;
  7531. int fw_stats;
  7532. enum cdp_stats stats;
  7533. int num_stats;
  7534. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
  7535. vdev_id);
  7536. if (!vdev || !req) {
  7537. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7538. "Invalid vdev/req instance");
  7539. return QDF_STATUS_E_INVAL;
  7540. }
  7541. if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
  7542. dp_err("Invalid mac id request");
  7543. return QDF_STATUS_E_INVAL;
  7544. }
  7545. stats = req->stats;
  7546. if (stats >= CDP_TXRX_MAX_STATS)
  7547. return QDF_STATUS_E_INVAL;
  7548. /*
  7549. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7550. * has to be updated if new FW HTT stats added
  7551. */
  7552. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7553. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7554. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7555. if (stats >= num_stats) {
  7556. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7557. "%s: Invalid stats option: %d", __func__, stats);
  7558. return QDF_STATUS_E_INVAL;
  7559. }
  7560. req->stats = stats;
  7561. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7562. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7563. dp_info("stats: %u fw_stats_type: %d host_stats: %d",
  7564. stats, fw_stats, host_stats);
  7565. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7566. /* update request with FW stats type */
  7567. req->stats = fw_stats;
  7568. return dp_fw_stats_process(vdev, req);
  7569. }
  7570. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7571. (host_stats <= TXRX_HOST_STATS_MAX))
  7572. return dp_print_host_stats(vdev, req);
  7573. else
  7574. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7575. "Wrong Input for TxRx Stats");
  7576. return QDF_STATUS_SUCCESS;
  7577. }
  7578. /*
  7579. * dp_txrx_dump_stats() - Dump statistics
  7580. * @value - Statistics option
  7581. */
  7582. static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
  7583. enum qdf_stats_verbosity_level level)
  7584. {
  7585. struct dp_soc *soc =
  7586. (struct dp_soc *)psoc;
  7587. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7588. if (!soc) {
  7589. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7590. "%s: soc is NULL", __func__);
  7591. return QDF_STATUS_E_INVAL;
  7592. }
  7593. switch (value) {
  7594. case CDP_TXRX_PATH_STATS:
  7595. dp_txrx_path_stats(soc);
  7596. dp_print_soc_interrupt_stats(soc);
  7597. break;
  7598. case CDP_RX_RING_STATS:
  7599. dp_print_per_ring_stats(soc);
  7600. break;
  7601. case CDP_TXRX_TSO_STATS:
  7602. dp_print_tso_stats(soc, level);
  7603. break;
  7604. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7605. if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
  7606. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7607. break;
  7608. case CDP_DP_NAPI_STATS:
  7609. dp_print_napi_stats(soc);
  7610. break;
  7611. case CDP_TXRX_DESC_STATS:
  7612. /* TODO: NOT IMPLEMENTED */
  7613. break;
  7614. default:
  7615. status = QDF_STATUS_E_INVAL;
  7616. break;
  7617. }
  7618. return status;
  7619. }
  7620. /**
  7621. * dp_txrx_clear_dump_stats() - clear dumpStats
  7622. * @soc- soc handle
  7623. * @value - stats option
  7624. *
  7625. * Return: 0 - Success, non-zero - failure
  7626. */
  7627. static
  7628. QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  7629. uint8_t value)
  7630. {
  7631. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  7632. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7633. if (!soc) {
  7634. dp_err("%s: soc is NULL", __func__);
  7635. return QDF_STATUS_E_INVAL;
  7636. }
  7637. switch (value) {
  7638. case CDP_TXRX_TSO_STATS:
  7639. dp_txrx_clear_tso_stats(soc);
  7640. break;
  7641. default:
  7642. status = QDF_STATUS_E_INVAL;
  7643. break;
  7644. }
  7645. return status;
  7646. }
  7647. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7648. /**
  7649. * dp_update_flow_control_parameters() - API to store datapath
  7650. * config parameters
  7651. * @soc: soc handle
  7652. * @cfg: ini parameter handle
  7653. *
  7654. * Return: void
  7655. */
  7656. static inline
  7657. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7658. struct cdp_config_params *params)
  7659. {
  7660. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7661. params->tx_flow_stop_queue_threshold;
  7662. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7663. params->tx_flow_start_queue_offset;
  7664. }
  7665. #else
  7666. static inline
  7667. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7668. struct cdp_config_params *params)
  7669. {
  7670. }
  7671. #endif
  7672. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  7673. /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
  7674. #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
  7675. /* Max packet limit for RX REAP Loop (dp_rx_process) */
  7676. #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
  7677. static
  7678. void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
  7679. struct cdp_config_params *params)
  7680. {
  7681. soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
  7682. params->tx_comp_loop_pkt_limit;
  7683. if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
  7684. soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
  7685. else
  7686. soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
  7687. soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
  7688. params->rx_reap_loop_pkt_limit;
  7689. if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
  7690. soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
  7691. else
  7692. soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
  7693. soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
  7694. params->rx_hp_oos_update_limit;
  7695. dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
  7696. soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
  7697. soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
  7698. soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
  7699. soc->wlan_cfg_ctx->rx_enable_eol_data_check,
  7700. soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
  7701. }
  7702. #else
  7703. static inline
  7704. void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
  7705. struct cdp_config_params *params)
  7706. { }
  7707. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  7708. /**
  7709. * dp_update_config_parameters() - API to store datapath
  7710. * config parameters
  7711. * @soc: soc handle
  7712. * @cfg: ini parameter handle
  7713. *
  7714. * Return: status
  7715. */
  7716. static
  7717. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7718. struct cdp_config_params *params)
  7719. {
  7720. struct dp_soc *soc = (struct dp_soc *)psoc;
  7721. if (!(soc)) {
  7722. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7723. "%s: Invalid handle", __func__);
  7724. return QDF_STATUS_E_INVAL;
  7725. }
  7726. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7727. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7728. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7729. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7730. params->tcp_udp_checksumoffload;
  7731. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7732. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7733. soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
  7734. dp_update_rx_soft_irq_limit_params(soc, params);
  7735. dp_update_flow_control_parameters(soc, params);
  7736. return QDF_STATUS_SUCCESS;
  7737. }
  7738. static struct cdp_wds_ops dp_ops_wds = {
  7739. .vdev_set_wds = dp_vdev_set_wds,
  7740. #ifdef WDS_VENDOR_EXTENSION
  7741. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7742. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7743. #endif
  7744. };
  7745. /*
  7746. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7747. * @soc_hdl - datapath soc handle
  7748. * @vdev_id - virtual interface id
  7749. * @callback - callback function
  7750. * @ctxt: callback context
  7751. *
  7752. */
  7753. static void
  7754. dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  7755. ol_txrx_data_tx_cb callback, void *ctxt)
  7756. {
  7757. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  7758. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  7759. if (!vdev)
  7760. return;
  7761. vdev->tx_non_std_data_callback.func = callback;
  7762. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7763. }
  7764. /**
  7765. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7766. * @soc: datapath soc handle
  7767. * @pdev_id: id of datapath pdev handle
  7768. *
  7769. * Return: opaque pointer to dp txrx handle
  7770. */
  7771. static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
  7772. {
  7773. struct dp_pdev *pdev =
  7774. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7775. pdev_id);
  7776. if (qdf_unlikely(!pdev))
  7777. return NULL;
  7778. return pdev->dp_txrx_handle;
  7779. }
  7780. /**
  7781. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7782. * @soc: datapath soc handle
  7783. * @pdev_id: id of datapath pdev handle
  7784. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7785. *
  7786. * Return: void
  7787. */
  7788. static void
  7789. dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
  7790. void *dp_txrx_hdl)
  7791. {
  7792. struct dp_pdev *pdev =
  7793. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7794. pdev_id);
  7795. if (!pdev)
  7796. return;
  7797. pdev->dp_txrx_handle = dp_txrx_hdl;
  7798. }
  7799. /**
  7800. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7801. * @soc_handle: datapath soc handle
  7802. *
  7803. * Return: opaque pointer to external dp (non-core DP)
  7804. */
  7805. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7806. {
  7807. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7808. return soc->external_txrx_handle;
  7809. }
  7810. /**
  7811. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7812. * @soc_handle: datapath soc handle
  7813. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7814. *
  7815. * Return: void
  7816. */
  7817. static void
  7818. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7819. {
  7820. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7821. soc->external_txrx_handle = txrx_handle;
  7822. }
  7823. /**
  7824. * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
  7825. * @soc_hdl: datapath soc handle
  7826. * @pdev_id: id of the datapath pdev handle
  7827. * @lmac_id: lmac id
  7828. *
  7829. * Return: QDF_STATUS
  7830. */
  7831. static QDF_STATUS
  7832. dp_soc_map_pdev_to_lmac(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  7833. uint32_t lmac_id)
  7834. {
  7835. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7836. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
  7837. pdev_id);
  7838. if (qdf_unlikely(!pdev))
  7839. return QDF_STATUS_E_FAILURE;
  7840. pdev->lmac_id = lmac_id;
  7841. wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
  7842. pdev_id,
  7843. (lmac_id + 1));
  7844. return QDF_STATUS_SUCCESS;
  7845. }
  7846. /**
  7847. * dp_soc_set_pdev_status_down() - set pdev down/up status
  7848. * @soc: datapath soc handle
  7849. * @pdev_id: id of datapath pdev handle
  7850. * @is_pdev_down: pdev down/up status
  7851. *
  7852. * Return: QDF_STATUS
  7853. */
  7854. static QDF_STATUS
  7855. dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
  7856. bool is_pdev_down)
  7857. {
  7858. struct dp_pdev *pdev =
  7859. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  7860. pdev_id);
  7861. if (!pdev)
  7862. return QDF_STATUS_E_FAILURE;
  7863. pdev->is_pdev_down = is_pdev_down;
  7864. return QDF_STATUS_SUCCESS;
  7865. }
  7866. /**
  7867. * dp_get_cfg_capabilities() - get dp capabilities
  7868. * @soc_handle: datapath soc handle
  7869. * @dp_caps: enum for dp capabilities
  7870. *
  7871. * Return: bool to determine if dp caps is enabled
  7872. */
  7873. static bool
  7874. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7875. enum cdp_capabilities dp_caps)
  7876. {
  7877. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7878. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7879. }
  7880. #ifdef FEATURE_AST
  7881. static QDF_STATUS
  7882. dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  7883. uint8_t *peer_mac)
  7884. {
  7885. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7886. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7887. struct dp_peer *peer =
  7888. dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
  7889. /* Peer can be null for monitor vap mac address */
  7890. if (!peer || peer->delete_in_progress) {
  7891. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  7892. "%s: Invalid peer\n", __func__);
  7893. status = QDF_STATUS_E_FAILURE;
  7894. goto fail;
  7895. }
  7896. /*
  7897. * For BSS peer, new peer is not created on alloc_node if the
  7898. * peer with same address already exists , instead refcnt is
  7899. * increased for existing peer. Correspondingly in delete path,
  7900. * only refcnt is decreased; and peer is only deleted , when all
  7901. * references are deleted. So delete_in_progress should not be set
  7902. * for bss_peer, unless only 3 reference remains (peer map reference,
  7903. * peer hash table reference and above local reference).
  7904. */
  7905. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 3)) {
  7906. status = QDF_STATUS_E_FAILURE;
  7907. goto fail;
  7908. }
  7909. qdf_spin_lock_bh(&soc->ast_lock);
  7910. peer->delete_in_progress = true;
  7911. dp_peer_delete_ast_entries(soc, peer);
  7912. qdf_spin_unlock_bh(&soc->ast_lock);
  7913. fail:
  7914. if (peer)
  7915. dp_peer_unref_delete(peer);
  7916. return status;
  7917. }
  7918. #endif
  7919. #ifdef ATH_SUPPORT_NAC_RSSI
  7920. /**
  7921. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7922. * @vdev_hdl: DP vdev handle
  7923. * @rssi: rssi value
  7924. *
  7925. * Return: 0 for success. nonzero for failure.
  7926. */
  7927. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7928. char *mac_addr, uint8_t *rssi) {
  7929. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7930. struct dp_pdev *pdev = vdev->pdev;
  7931. struct dp_neighbour_peer *peer = NULL;
  7932. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7933. *rssi = 0;
  7934. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7935. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7936. neighbour_peer_list_elem) {
  7937. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7938. mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
  7939. *rssi = peer->rssi;
  7940. status = QDF_STATUS_SUCCESS;
  7941. break;
  7942. }
  7943. }
  7944. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7945. return status;
  7946. }
  7947. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7948. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7949. uint8_t chan_num)
  7950. {
  7951. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7952. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7953. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7954. pdev->nac_rssi_filtering = 1;
  7955. /* Store address of NAC (neighbour peer) which will be checked
  7956. * against TA of received packets.
  7957. */
  7958. if (cmd == CDP_NAC_PARAM_ADD) {
  7959. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7960. client_macaddr);
  7961. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7962. dp_update_filter_neighbour_peers(vdev_handle,
  7963. DP_NAC_PARAM_DEL,
  7964. client_macaddr);
  7965. }
  7966. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7967. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7968. (soc->ctrl_psoc, pdev->pdev_id,
  7969. vdev->vdev_id, cmd, bssid, client_macaddr);
  7970. return QDF_STATUS_SUCCESS;
  7971. }
  7972. #endif
  7973. /**
  7974. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7975. * for pktlog
  7976. * @txrx_pdev_handle: cdp_pdev handle
  7977. * @enb_dsb: Enable or disable peer based filtering
  7978. *
  7979. * Return: QDF_STATUS
  7980. */
  7981. static int
  7982. dp_enable_peer_based_pktlog(
  7983. struct cdp_pdev *txrx_pdev_handle,
  7984. char *mac_addr, uint8_t enb_dsb)
  7985. {
  7986. struct dp_peer *peer;
  7987. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7988. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7989. mac_addr);
  7990. if (!peer) {
  7991. dp_err("Invalid Peer");
  7992. return QDF_STATUS_E_FAILURE;
  7993. }
  7994. peer->peer_based_pktlog_filter = enb_dsb;
  7995. pdev->dp_peer_based_pktlog = enb_dsb;
  7996. return QDF_STATUS_SUCCESS;
  7997. }
  7998. #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
  7999. /**
  8000. * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
  8001. * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
  8002. * @pdev_handle: cdp_pdev handle
  8003. * @protocol_type: protocol type for which stats should be displayed
  8004. *
  8005. * Return: none
  8006. */
  8007. static inline void
  8008. dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
  8009. uint16_t protocol_type)
  8010. {
  8011. }
  8012. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  8013. #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  8014. /**
  8015. * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
  8016. * applied to the desired protocol type packets
  8017. * @txrx_pdev_handle: cdp_pdev handle
  8018. * @enable_rx_protocol_tag - bitmask that indicates what protocol types
  8019. * are enabled for tagging. zero indicates disable feature, non-zero indicates
  8020. * enable feature
  8021. * @protocol_type: new protocol type for which the tag is being added
  8022. * @tag: user configured tag for the new protocol
  8023. *
  8024. * Return: Success
  8025. */
  8026. static inline QDF_STATUS
  8027. dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
  8028. uint32_t enable_rx_protocol_tag,
  8029. uint16_t protocol_type,
  8030. uint16_t tag)
  8031. {
  8032. return QDF_STATUS_SUCCESS;
  8033. }
  8034. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  8035. #ifndef WLAN_SUPPORT_RX_FLOW_TAG
  8036. /**
  8037. * dp_set_rx_flow_tag - add/delete a flow
  8038. * @pdev_handle: cdp_pdev handle
  8039. * @flow_info: flow tuple that is to be added to/deleted from flow search table
  8040. *
  8041. * Return: Success
  8042. */
  8043. static inline QDF_STATUS
  8044. dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
  8045. struct cdp_rx_flow_info *flow_info)
  8046. {
  8047. return QDF_STATUS_SUCCESS;
  8048. }
  8049. /**
  8050. * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
  8051. * given flow 5-tuple
  8052. * @pdev_handle: cdp_pdev handle
  8053. * @flow_info: flow 5-tuple for which stats should be displayed
  8054. *
  8055. * Return: Success
  8056. */
  8057. static inline QDF_STATUS
  8058. dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
  8059. struct cdp_rx_flow_info *flow_info)
  8060. {
  8061. return QDF_STATUS_SUCCESS;
  8062. }
  8063. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  8064. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  8065. uint32_t max_peers,
  8066. uint32_t max_ast_index,
  8067. bool peer_map_unmap_v2)
  8068. {
  8069. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8070. soc->max_peers = max_peers;
  8071. qdf_print ("%s max_peers %u, max_ast_index: %u\n",
  8072. __func__, max_peers, max_ast_index);
  8073. wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
  8074. if (dp_peer_find_attach(soc))
  8075. return QDF_STATUS_E_FAILURE;
  8076. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  8077. return QDF_STATUS_SUCCESS;
  8078. }
  8079. static QDF_STATUS dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
  8080. uint8_t val)
  8081. {
  8082. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8083. soc->wlanstats_enabled = val;
  8084. return QDF_STATUS_SUCCESS;
  8085. }
  8086. static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
  8087. void *stats_ctx)
  8088. {
  8089. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  8090. soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
  8091. }
  8092. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  8093. static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
  8094. uint8_t pdev_id)
  8095. {
  8096. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8097. struct dp_vdev *vdev = NULL;
  8098. struct dp_peer *peer = NULL;
  8099. struct dp_pdev *pdev =
  8100. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  8101. pdev_id);
  8102. if (!pdev)
  8103. return QDF_STATUS_E_FAILURE;
  8104. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  8105. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  8106. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  8107. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  8108. if (peer && !peer->bss_peer)
  8109. dp_wdi_event_handler(
  8110. WDI_EVENT_FLUSH_RATE_STATS_REQ,
  8111. soc, peer->wlanstats_ctx,
  8112. peer->peer_ids[0],
  8113. WDI_NO_VAL, pdev_id);
  8114. }
  8115. }
  8116. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  8117. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  8118. return QDF_STATUS_SUCCESS;
  8119. }
  8120. #else
  8121. static inline QDF_STATUS
  8122. dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
  8123. uint8_t pdev_id)
  8124. {
  8125. return QDF_STATUS_SUCCESS;
  8126. }
  8127. #endif
  8128. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  8129. static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
  8130. uint8_t pdev_id,
  8131. void *buf)
  8132. {
  8133. dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
  8134. (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
  8135. WDI_NO_VAL, pdev_id);
  8136. return QDF_STATUS_SUCCESS;
  8137. }
  8138. #else
  8139. static inline QDF_STATUS
  8140. dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
  8141. uint8_t pdev_id,
  8142. void *buf)
  8143. {
  8144. return QDF_STATUS_SUCCESS;
  8145. }
  8146. #endif
  8147. static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
  8148. {
  8149. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  8150. return soc->rate_stats_ctx;
  8151. }
  8152. /*
  8153. * dp_get_cfg() - get dp cfg
  8154. * @soc: cdp soc handle
  8155. * @cfg: cfg enum
  8156. *
  8157. * Return: cfg value
  8158. */
  8159. static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
  8160. {
  8161. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  8162. uint32_t value = 0;
  8163. switch (cfg) {
  8164. case cfg_dp_enable_data_stall:
  8165. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  8166. break;
  8167. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  8168. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  8169. break;
  8170. case cfg_dp_tso_enable:
  8171. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  8172. break;
  8173. case cfg_dp_lro_enable:
  8174. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  8175. break;
  8176. case cfg_dp_gro_enable:
  8177. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  8178. break;
  8179. case cfg_dp_tx_flow_start_queue_offset:
  8180. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  8181. break;
  8182. case cfg_dp_tx_flow_stop_queue_threshold:
  8183. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  8184. break;
  8185. case cfg_dp_disable_intra_bss_fwd:
  8186. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  8187. break;
  8188. default:
  8189. value = 0;
  8190. }
  8191. return value;
  8192. }
  8193. #ifdef PEER_FLOW_CONTROL
  8194. /**
  8195. * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
  8196. * @soc_handle: datapath soc handle
  8197. * @pdev_id: id of datapath pdev handle
  8198. * @param: ol ath params
  8199. * @value: value of the flag
  8200. * @buff: Buffer to be passed
  8201. *
  8202. * Implemented this function same as legacy function. In legacy code, single
  8203. * function is used to display stats and update pdev params.
  8204. *
  8205. * Return: 0 for success. nonzero for failure.
  8206. */
  8207. static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
  8208. uint8_t pdev_id,
  8209. enum _ol_ath_param_t param,
  8210. uint32_t value, void *buff)
  8211. {
  8212. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  8213. struct dp_pdev *pdev =
  8214. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  8215. pdev_id);
  8216. if (qdf_unlikely(!pdev))
  8217. return 1;
  8218. soc = pdev->soc;
  8219. if (!soc)
  8220. return 1;
  8221. switch (param) {
  8222. #ifdef QCA_ENH_V3_STATS_SUPPORT
  8223. case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
  8224. if (value)
  8225. pdev->delay_stats_flag = true;
  8226. else
  8227. pdev->delay_stats_flag = false;
  8228. break;
  8229. case OL_ATH_PARAM_VIDEO_STATS_FC:
  8230. qdf_print("------- TID Stats ------\n");
  8231. dp_pdev_print_tid_stats(pdev);
  8232. qdf_print("------ Delay Stats ------\n");
  8233. dp_pdev_print_delay_stats(pdev);
  8234. break;
  8235. #endif
  8236. case OL_ATH_PARAM_TOTAL_Q_SIZE:
  8237. {
  8238. uint32_t tx_min, tx_max;
  8239. tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
  8240. tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  8241. if (!buff) {
  8242. if ((value >= tx_min) && (value <= tx_max)) {
  8243. pdev->num_tx_allowed = value;
  8244. } else {
  8245. QDF_TRACE(QDF_MODULE_ID_DP,
  8246. QDF_TRACE_LEVEL_INFO,
  8247. "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
  8248. tx_min, tx_max);
  8249. break;
  8250. }
  8251. } else {
  8252. *(int *)buff = pdev->num_tx_allowed;
  8253. }
  8254. }
  8255. break;
  8256. default:
  8257. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  8258. "%s: not handled param %d ", __func__, param);
  8259. break;
  8260. }
  8261. return 0;
  8262. }
  8263. #endif
  8264. /**
  8265. * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
  8266. * @psoc: dp soc handle
  8267. * @pdev_id: id of DP_PDEV handle
  8268. * @pcp: pcp value
  8269. * @tid: tid value passed by the user
  8270. *
  8271. * Return: QDF_STATUS_SUCCESS on success
  8272. */
  8273. static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
  8274. uint8_t pdev_id,
  8275. uint8_t pcp, uint8_t tid)
  8276. {
  8277. struct dp_soc *soc = (struct dp_soc *)psoc;
  8278. soc->pcp_tid_map[pcp] = tid;
  8279. hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
  8280. return QDF_STATUS_SUCCESS;
  8281. }
  8282. /**
  8283. * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
  8284. * @vdev: DP_PDEV handle
  8285. * @prio: tidmap priority value passed by the user
  8286. *
  8287. * Return: QDF_STATUS_SUCCESS on success
  8288. */
  8289. static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
  8290. uint8_t prio)
  8291. {
  8292. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  8293. struct dp_soc *soc = pdev->soc;
  8294. soc->tidmap_prty = prio;
  8295. hal_tx_set_tidmap_prty(soc->hal_soc, prio);
  8296. return QDF_STATUS_SUCCESS;
  8297. }
  8298. /**
  8299. * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
  8300. * @soc: DP soc handle
  8301. * @vdev_id: id of DP_VDEV handle
  8302. * @pcp: pcp value
  8303. * @tid: tid value passed by the user
  8304. *
  8305. * Return: QDF_STATUS_SUCCESS on success
  8306. */
  8307. static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
  8308. uint8_t vdev_id,
  8309. uint8_t pcp, uint8_t tid)
  8310. {
  8311. struct dp_vdev *vdev =
  8312. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  8313. vdev_id);
  8314. if (!vdev)
  8315. return QDF_STATUS_E_FAILURE;
  8316. vdev->pcp_tid_map[pcp] = tid;
  8317. return QDF_STATUS_SUCCESS;
  8318. }
  8319. /**
  8320. * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
  8321. * @vdev: DP_VDEV handle
  8322. * @mapid: map_id value passed by the user
  8323. *
  8324. * Return: QDF_STATUS_SUCCESS on success
  8325. */
  8326. static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
  8327. uint8_t mapid)
  8328. {
  8329. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  8330. vdev->tidmap_tbl_id = mapid;
  8331. return QDF_STATUS_SUCCESS;
  8332. }
  8333. /**
  8334. * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
  8335. * @vdev: DP_VDEV handle
  8336. * @prio: tidmap priority value passed by the user
  8337. *
  8338. * Return: QDF_STATUS_SUCCESS on success
  8339. */
  8340. static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
  8341. uint8_t prio)
  8342. {
  8343. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  8344. vdev->tidmap_prty = prio;
  8345. return QDF_STATUS_SUCCESS;
  8346. }
  8347. static struct cdp_cmn_ops dp_ops_cmn = {
  8348. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  8349. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  8350. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  8351. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  8352. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  8353. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  8354. .txrx_peer_create = dp_peer_create_wifi3,
  8355. .txrx_peer_setup = dp_peer_setup_wifi3,
  8356. #ifdef FEATURE_AST
  8357. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  8358. #else
  8359. .txrx_peer_teardown = NULL,
  8360. #endif
  8361. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  8362. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  8363. .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
  8364. .txrx_peer_get_ast_info_by_pdev =
  8365. dp_peer_get_ast_info_by_pdevid_wifi3,
  8366. .txrx_peer_ast_delete_by_soc =
  8367. dp_peer_ast_entry_del_by_soc,
  8368. .txrx_peer_ast_delete_by_pdev =
  8369. dp_peer_ast_entry_del_by_pdev,
  8370. .txrx_peer_delete = dp_peer_delete_wifi3,
  8371. .txrx_vdev_register = dp_vdev_register_wifi3,
  8372. .txrx_soc_detach = dp_soc_detach_wifi3,
  8373. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  8374. .txrx_soc_init = dp_soc_init_wifi3,
  8375. .txrx_tso_soc_attach = dp_tso_soc_attach,
  8376. .txrx_tso_soc_detach = dp_tso_soc_detach,
  8377. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  8378. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  8379. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  8380. .txrx_ath_getstats = dp_get_device_stats,
  8381. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  8382. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  8383. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  8384. .delba_process = dp_delba_process_wifi3,
  8385. .set_addba_response = dp_set_addba_response,
  8386. .flush_cache_rx_queue = NULL,
  8387. /* TODO: get API's for dscp-tid need to be added*/
  8388. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  8389. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  8390. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  8391. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  8392. .txrx_get_total_per = dp_get_total_per,
  8393. .txrx_stats_request = dp_txrx_stats_request,
  8394. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  8395. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  8396. .txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
  8397. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  8398. .txrx_set_nac = dp_set_nac,
  8399. .txrx_get_tx_pending = dp_get_tx_pending,
  8400. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  8401. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  8402. .display_stats = dp_txrx_dump_stats,
  8403. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  8404. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  8405. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  8406. .txrx_intr_detach = dp_soc_interrupt_detach,
  8407. .set_pn_check = dp_set_pn_check_wifi3,
  8408. .update_config_parameters = dp_update_config_parameters,
  8409. /* TODO: Add other functions */
  8410. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  8411. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  8412. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  8413. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  8414. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  8415. .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
  8416. .set_pdev_status_down = dp_soc_set_pdev_status_down,
  8417. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  8418. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  8419. .tx_send = dp_tx_send,
  8420. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  8421. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  8422. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  8423. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  8424. .txrx_get_os_rx_handles_from_vdev =
  8425. dp_get_os_rx_handles_from_vdev_wifi3,
  8426. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  8427. .get_dp_capabilities = dp_get_cfg_capabilities,
  8428. .txrx_get_cfg = dp_get_cfg,
  8429. .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
  8430. .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
  8431. .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
  8432. .txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
  8433. .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
  8434. .set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
  8435. .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
  8436. .set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
  8437. .set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
  8438. .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
  8439. #ifdef QCA_MULTIPASS_SUPPORT
  8440. .set_vlan_groupkey = dp_set_vlan_groupkey,
  8441. #endif
  8442. };
  8443. static struct cdp_ctrl_ops dp_ops_ctrl = {
  8444. .txrx_peer_authorize = dp_peer_authorize,
  8445. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  8446. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  8447. #ifdef MESH_MODE_SUPPORT
  8448. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  8449. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  8450. #endif
  8451. .txrx_set_vdev_param = dp_set_vdev_param,
  8452. .txrx_peer_set_nawds = dp_peer_set_nawds,
  8453. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  8454. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  8455. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  8456. .txrx_update_filter_neighbour_peers =
  8457. dp_update_filter_neighbour_peers,
  8458. .txrx_get_sec_type = dp_get_sec_type,
  8459. /* TODO: Add other functions */
  8460. .txrx_wdi_event_sub = dp_wdi_event_sub,
  8461. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  8462. #ifdef WDI_EVENT_ENABLE
  8463. .txrx_get_pldev = dp_get_pldev,
  8464. #endif
  8465. .txrx_set_pdev_param = dp_set_pdev_param,
  8466. #ifdef ATH_SUPPORT_NAC_RSSI
  8467. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  8468. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  8469. #endif
  8470. .set_key = dp_set_michael_key,
  8471. .txrx_get_vdev_param = dp_get_vdev_param,
  8472. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  8473. .calculate_delay_stats = dp_calculate_delay_stats,
  8474. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  8475. .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
  8476. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  8477. .txrx_dump_pdev_rx_protocol_tag_stats =
  8478. dp_dump_pdev_rx_protocol_tag_stats,
  8479. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  8480. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  8481. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  8482. .txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
  8483. .txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
  8484. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  8485. #ifdef QCA_MULTIPASS_SUPPORT
  8486. .txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
  8487. #endif /*QCA_MULTIPASS_SUPPORT*/
  8488. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  8489. .txrx_update_peer_pkt_capture_params =
  8490. dp_peer_update_pkt_capture_params,
  8491. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  8492. };
  8493. static struct cdp_me_ops dp_ops_me = {
  8494. #ifdef ATH_SUPPORT_IQUE
  8495. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  8496. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  8497. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  8498. #endif
  8499. };
  8500. static struct cdp_mon_ops dp_ops_mon = {
  8501. .txrx_monitor_set_filter_ucast_data = NULL,
  8502. .txrx_monitor_set_filter_mcast_data = NULL,
  8503. .txrx_monitor_set_filter_non_data = NULL,
  8504. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  8505. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  8506. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  8507. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  8508. /* Added support for HK advance filter */
  8509. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  8510. .txrx_monitor_record_channel = dp_pdev_set_monitor_channel,
  8511. .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
  8512. .txrx_set_bsscolor = dp_mon_set_bsscolor,
  8513. };
  8514. static struct cdp_host_stats_ops dp_ops_host_stats = {
  8515. .txrx_per_peer_stats = dp_get_host_peer_stats,
  8516. .get_fw_peer_stats = dp_get_fw_peer_stats,
  8517. .get_htt_stats = dp_get_htt_stats,
  8518. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  8519. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  8520. .txrx_stats_publish = dp_txrx_stats_publish,
  8521. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  8522. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  8523. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  8524. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  8525. .txrx_get_ratekbps = dp_txrx_get_ratekbps,
  8526. .configure_rate_stats = dp_set_rate_stats_cap,
  8527. .txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
  8528. /* TODO */
  8529. };
  8530. static struct cdp_raw_ops dp_ops_raw = {
  8531. /* TODO */
  8532. };
  8533. #ifdef PEER_FLOW_CONTROL
  8534. static struct cdp_pflow_ops dp_ops_pflow = {
  8535. dp_tx_flow_ctrl_configure_pdev,
  8536. };
  8537. #endif /* CONFIG_WIN */
  8538. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  8539. static struct cdp_cfr_ops dp_ops_cfr = {
  8540. .txrx_cfr_filter = dp_cfr_filter,
  8541. };
  8542. #endif
  8543. #ifdef FEATURE_RUNTIME_PM
  8544. /**
  8545. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  8546. * @soc_hdl: Datapath soc handle
  8547. * @pdev_id: id of data path pdev handle
  8548. *
  8549. * DP is ready to runtime suspend if there are no pending TX packets.
  8550. *
  8551. * Return: QDF_STATUS
  8552. */
  8553. static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  8554. {
  8555. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8556. struct dp_pdev *pdev;
  8557. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8558. if (!pdev) {
  8559. dp_err("pdev is NULL");
  8560. return QDF_STATUS_E_INVAL;
  8561. }
  8562. /* Abort if there are any pending TX packets */
  8563. if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
  8564. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  8565. FL("Abort suspend due to pending TX packets"));
  8566. return QDF_STATUS_E_AGAIN;
  8567. }
  8568. if (soc->intr_mode == DP_INTR_POLL)
  8569. qdf_timer_stop(&soc->int_timer);
  8570. return QDF_STATUS_SUCCESS;
  8571. }
  8572. /**
  8573. * dp_flush_ring_hptp() - Update ring shadow
  8574. * register HP/TP address when runtime
  8575. * resume
  8576. * @opaque_soc: DP soc context
  8577. *
  8578. * Return: None
  8579. */
  8580. static
  8581. void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
  8582. {
  8583. if (hal_srng && hal_srng_get_clear_event(hal_srng,
  8584. HAL_SRNG_FLUSH_EVENT)) {
  8585. /* Acquire the lock */
  8586. hal_srng_access_start(soc->hal_soc, hal_srng);
  8587. hal_srng_access_end(soc->hal_soc, hal_srng);
  8588. hal_srng_set_flush_last_ts(hal_srng);
  8589. }
  8590. }
  8591. /**
  8592. * dp_runtime_resume() - ensure DP is ready to runtime resume
  8593. * @soc_hdl: Datapath soc handle
  8594. * @pdev_id: id of data path pdev handle
  8595. *
  8596. * Resume DP for runtime PM.
  8597. *
  8598. * Return: QDF_STATUS
  8599. */
  8600. static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  8601. {
  8602. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8603. int i;
  8604. if (soc->intr_mode == DP_INTR_POLL)
  8605. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8606. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  8607. dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
  8608. }
  8609. dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
  8610. return QDF_STATUS_SUCCESS;
  8611. }
  8612. #endif /* FEATURE_RUNTIME_PM */
  8613. /**
  8614. * dp_tx_get_success_ack_stats() - get tx success completion count
  8615. * @soc_hdl: Datapath soc handle
  8616. * @vdevid: vdev identifier
  8617. *
  8618. * Return: tx success ack count
  8619. */
  8620. static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
  8621. uint8_t vdev_id)
  8622. {
  8623. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8624. struct cdp_vdev_stats *vdev_stats = NULL;
  8625. uint32_t tx_success;
  8626. struct dp_vdev *vdev =
  8627. (struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
  8628. vdev_id);
  8629. if (!vdev) {
  8630. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  8631. FL("Invalid vdev id %d"), vdev_id);
  8632. return 0;
  8633. }
  8634. vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
  8635. if (!vdev_stats) {
  8636. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  8637. "DP alloc failure - unable to get alloc vdev stats");
  8638. return 0;
  8639. }
  8640. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  8641. dp_aggregate_vdev_stats(vdev, vdev_stats);
  8642. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  8643. tx_success = vdev_stats->tx.tx_success.num;
  8644. qdf_mem_free(vdev_stats);
  8645. return tx_success;
  8646. }
  8647. #ifdef WLAN_SUPPORT_DATA_STALL
  8648. /**
  8649. * dp_register_data_stall_detect_cb() - register data stall callback
  8650. * @soc_hdl: Datapath soc handle
  8651. * @pdev_id: id of data path pdev handle
  8652. * @data_stall_detect_callback: data stall callback function
  8653. *
  8654. * Return: QDF_STATUS Enumeration
  8655. */
  8656. static
  8657. QDF_STATUS dp_register_data_stall_detect_cb(
  8658. struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  8659. data_stall_detect_cb data_stall_detect_callback)
  8660. {
  8661. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8662. struct dp_pdev *pdev;
  8663. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8664. if (!pdev) {
  8665. dp_err("pdev NULL!");
  8666. return QDF_STATUS_E_INVAL;
  8667. }
  8668. pdev->data_stall_detect_callback = data_stall_detect_callback;
  8669. return QDF_STATUS_SUCCESS;
  8670. }
  8671. /**
  8672. * dp_deregister_data_stall_detect_cb() - de-register data stall callback
  8673. * @soc_hdl: Datapath soc handle
  8674. * @pdev_id: id of data path pdev handle
  8675. * @data_stall_detect_callback: data stall callback function
  8676. *
  8677. * Return: QDF_STATUS Enumeration
  8678. */
  8679. static
  8680. QDF_STATUS dp_deregister_data_stall_detect_cb(
  8681. struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  8682. data_stall_detect_cb data_stall_detect_callback)
  8683. {
  8684. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8685. struct dp_pdev *pdev;
  8686. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8687. if (!pdev) {
  8688. dp_err("pdev NULL!");
  8689. return QDF_STATUS_E_INVAL;
  8690. }
  8691. pdev->data_stall_detect_callback = NULL;
  8692. return QDF_STATUS_SUCCESS;
  8693. }
  8694. /**
  8695. * dp_txrx_post_data_stall_event() - post data stall event
  8696. * @soc_hdl: Datapath soc handle
  8697. * @indicator: Module triggering data stall
  8698. * @data_stall_type: data stall event type
  8699. * @pdev_id: pdev id
  8700. * @vdev_id_bitmap: vdev id bitmap
  8701. * @recovery_type: data stall recovery type
  8702. *
  8703. * Return: None
  8704. */
  8705. static void
  8706. dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
  8707. enum data_stall_log_event_indicator indicator,
  8708. enum data_stall_log_event_type data_stall_type,
  8709. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  8710. enum data_stall_log_recovery_type recovery_type)
  8711. {
  8712. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8713. struct data_stall_event_info data_stall_info;
  8714. struct dp_pdev *pdev;
  8715. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8716. if (!pdev) {
  8717. dp_err("pdev NULL!");
  8718. return;
  8719. }
  8720. if (!pdev->data_stall_detect_callback) {
  8721. dp_err("data stall cb not registered!");
  8722. return;
  8723. }
  8724. dp_info("data_stall_type: %x pdev_id: %d",
  8725. data_stall_type, pdev_id);
  8726. data_stall_info.indicator = indicator;
  8727. data_stall_info.data_stall_type = data_stall_type;
  8728. data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
  8729. data_stall_info.pdev_id = pdev_id;
  8730. data_stall_info.recovery_type = recovery_type;
  8731. pdev->data_stall_detect_callback(&data_stall_info);
  8732. }
  8733. #endif /* WLAN_SUPPORT_DATA_STALL */
  8734. #ifdef WLAN_FEATURE_STATS_EXT
  8735. /* rx hw stats event wait timeout in ms */
  8736. #define DP_REO_STATUS_STATS_TIMEOUT 1000
  8737. /**
  8738. * dp_txrx_ext_stats_request - request dp txrx extended stats request
  8739. * @soc_hdl: soc handle
  8740. * @pdev_id: pdev id
  8741. * @req: stats request
  8742. *
  8743. * Return: QDF_STATUS
  8744. */
  8745. static QDF_STATUS
  8746. dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  8747. struct cdp_txrx_ext_stats *req)
  8748. {
  8749. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8750. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8751. if (!pdev) {
  8752. dp_err("pdev is null");
  8753. return QDF_STATUS_E_INVAL;
  8754. }
  8755. dp_aggregate_pdev_stats(pdev);
  8756. req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
  8757. req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
  8758. req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
  8759. req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
  8760. req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
  8761. req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
  8762. soc->stats.rx.rx_frags;
  8763. return QDF_STATUS_SUCCESS;
  8764. }
  8765. /**
  8766. * dp_rx_hw_stats_cb - request rx hw stats response callback
  8767. * @soc: soc handle
  8768. * @cb_ctxt: callback context
  8769. * @reo_status: reo command response status
  8770. *
  8771. * Return: None
  8772. */
  8773. static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  8774. union hal_reo_status *reo_status)
  8775. {
  8776. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  8777. struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
  8778. if (soc->ignore_reo_status_cb) {
  8779. qdf_event_set(&soc->rx_hw_stats_event);
  8780. return;
  8781. }
  8782. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  8783. dp_info("REO stats failure %d for TID %d",
  8784. queue_status->header.status, rx_tid->tid);
  8785. return;
  8786. }
  8787. soc->ext_stats.rx_mpdu_received += queue_status->mpdu_frms_cnt;
  8788. soc->ext_stats.rx_mpdu_missed += queue_status->late_recv_mpdu_cnt;
  8789. if (rx_tid->tid == (DP_MAX_TIDS - 1))
  8790. qdf_event_set(&soc->rx_hw_stats_event);
  8791. }
  8792. /**
  8793. * dp_request_rx_hw_stats - request rx hardware stats
  8794. * @soc_hdl: soc handle
  8795. * @vdev_id: vdev id
  8796. *
  8797. * Return: None
  8798. */
  8799. static void
  8800. dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  8801. {
  8802. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8803. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  8804. struct dp_peer *peer;
  8805. if (!vdev) {
  8806. dp_err("vdev is null");
  8807. qdf_event_set(&soc->rx_hw_stats_event);
  8808. return;
  8809. }
  8810. peer = vdev->vap_bss_peer;
  8811. if (!peer || peer->delete_in_progress) {
  8812. dp_err("Peer deletion in progress");
  8813. qdf_event_set(&soc->rx_hw_stats_event);
  8814. return;
  8815. }
  8816. qdf_event_reset(&soc->rx_hw_stats_event);
  8817. dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, NULL);
  8818. }
  8819. /**
  8820. * dp_wait_for_ext_rx_stats - wait for rx reo status for rx stats
  8821. * @soc_hdl: cdp opaque soc handle
  8822. *
  8823. * Return: status
  8824. */
  8825. static QDF_STATUS
  8826. dp_wait_for_ext_rx_stats(struct cdp_soc_t *soc_hdl)
  8827. {
  8828. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  8829. QDF_STATUS status;
  8830. status = qdf_wait_single_event(&soc->rx_hw_stats_event,
  8831. DP_REO_STATUS_STATS_TIMEOUT);
  8832. return status;
  8833. }
  8834. #endif /* WLAN_FEATURE_STATS_EXT */
  8835. #ifdef DP_PEER_EXTENDED_API
  8836. static struct cdp_misc_ops dp_ops_misc = {
  8837. #ifdef FEATURE_WLAN_TDLS
  8838. .tx_non_std = dp_tx_non_std,
  8839. #endif /* FEATURE_WLAN_TDLS */
  8840. .get_opmode = dp_get_opmode,
  8841. #ifdef FEATURE_RUNTIME_PM
  8842. .runtime_suspend = dp_runtime_suspend,
  8843. .runtime_resume = dp_runtime_resume,
  8844. #endif /* FEATURE_RUNTIME_PM */
  8845. .pkt_log_init = dp_pkt_log_init,
  8846. .pkt_log_con_service = dp_pkt_log_con_service,
  8847. .get_num_rx_contexts = dp_get_num_rx_contexts,
  8848. .get_tx_ack_stats = dp_tx_get_success_ack_stats,
  8849. #ifdef WLAN_SUPPORT_DATA_STALL
  8850. .txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
  8851. .txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
  8852. .txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
  8853. #endif
  8854. #ifdef WLAN_FEATURE_STATS_EXT
  8855. .txrx_ext_stats_request = dp_txrx_ext_stats_request,
  8856. .request_rx_hw_stats = dp_request_rx_hw_stats,
  8857. .wait_for_ext_rx_stats = dp_wait_for_ext_rx_stats,
  8858. #endif
  8859. };
  8860. #endif
  8861. #ifdef DP_FLOW_CTL
  8862. static struct cdp_flowctl_ops dp_ops_flowctl = {
  8863. /* WIFI 3.0 DP implement as required. */
  8864. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  8865. .flow_pool_map_handler = dp_tx_flow_pool_map,
  8866. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  8867. .register_pause_cb = dp_txrx_register_pause_cb,
  8868. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  8869. .tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
  8870. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  8871. };
  8872. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  8873. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8874. };
  8875. #endif
  8876. #ifdef IPA_OFFLOAD
  8877. static struct cdp_ipa_ops dp_ops_ipa = {
  8878. .ipa_get_resource = dp_ipa_get_resource,
  8879. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  8880. .ipa_op_response = dp_ipa_op_response,
  8881. .ipa_register_op_cb = dp_ipa_register_op_cb,
  8882. .ipa_get_stat = dp_ipa_get_stat,
  8883. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  8884. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  8885. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  8886. .ipa_setup = dp_ipa_setup,
  8887. .ipa_cleanup = dp_ipa_cleanup,
  8888. .ipa_setup_iface = dp_ipa_setup_iface,
  8889. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  8890. .ipa_enable_pipes = dp_ipa_enable_pipes,
  8891. .ipa_disable_pipes = dp_ipa_disable_pipes,
  8892. .ipa_set_perf_level = dp_ipa_set_perf_level,
  8893. .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
  8894. };
  8895. #endif
  8896. #ifdef DP_POWER_SAVE
  8897. static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  8898. {
  8899. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8900. struct cdp_pdev *pdev = (struct cdp_pdev *)
  8901. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  8902. int timeout = SUSPEND_DRAIN_WAIT;
  8903. int drain_wait_delay = 50; /* 50 ms */
  8904. if (qdf_unlikely(!pdev)) {
  8905. dp_err("pdev is NULL");
  8906. return QDF_STATUS_E_INVAL;
  8907. }
  8908. /* Abort if there are any pending TX packets */
  8909. while (dp_get_tx_pending(pdev) > 0) {
  8910. qdf_sleep(drain_wait_delay);
  8911. if (timeout <= 0) {
  8912. dp_err("TX frames are pending, abort suspend");
  8913. return QDF_STATUS_E_TIMEOUT;
  8914. }
  8915. timeout = timeout - drain_wait_delay;
  8916. }
  8917. if (soc->intr_mode == DP_INTR_POLL)
  8918. qdf_timer_stop(&soc->int_timer);
  8919. return QDF_STATUS_SUCCESS;
  8920. }
  8921. static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  8922. {
  8923. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  8924. if (soc->intr_mode == DP_INTR_POLL)
  8925. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8926. return QDF_STATUS_SUCCESS;
  8927. }
  8928. static struct cdp_bus_ops dp_ops_bus = {
  8929. .bus_suspend = dp_bus_suspend,
  8930. .bus_resume = dp_bus_resume
  8931. };
  8932. #endif
  8933. #ifdef DP_FLOW_CTL
  8934. static struct cdp_throttle_ops dp_ops_throttle = {
  8935. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8936. };
  8937. static struct cdp_cfg_ops dp_ops_cfg = {
  8938. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8939. };
  8940. #endif
  8941. #ifdef DP_PEER_EXTENDED_API
  8942. static struct cdp_ocb_ops dp_ops_ocb = {
  8943. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8944. };
  8945. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  8946. .clear_stats = dp_txrx_clear_dump_stats,
  8947. };
  8948. /*
  8949. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8950. * @dev: physical device instance
  8951. * @peer_mac_addr: peer mac address
  8952. * @debug_id: to track enum peer access
  8953. *
  8954. * Return: peer instance pointer
  8955. */
  8956. static inline void *
  8957. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8958. enum peer_debug_id_type debug_id)
  8959. {
  8960. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8961. struct dp_peer *peer;
  8962. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8963. if (!peer)
  8964. return NULL;
  8965. dp_info_rl("peer %pK mac: %pM", peer, peer->mac_addr.raw);
  8966. return peer;
  8967. }
  8968. /*
  8969. * dp_peer_release_ref - release peer ref count
  8970. * @peer: peer handle
  8971. * @debug_id: to track enum peer access
  8972. *
  8973. * Return: None
  8974. */
  8975. static inline
  8976. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8977. {
  8978. dp_peer_unref_delete(peer);
  8979. }
  8980. static struct cdp_peer_ops dp_ops_peer = {
  8981. .register_peer = dp_register_peer,
  8982. .clear_peer = dp_clear_peer,
  8983. .find_peer_by_addr = dp_find_peer_by_addr,
  8984. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8985. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8986. .peer_release_ref = dp_peer_release_ref,
  8987. .peer_state_update = dp_peer_state_update,
  8988. .get_vdevid = dp_get_vdevid,
  8989. .get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
  8990. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8991. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8992. .get_peer_state = dp_get_peer_state,
  8993. };
  8994. #endif
  8995. static struct cdp_ops dp_txrx_ops = {
  8996. .cmn_drv_ops = &dp_ops_cmn,
  8997. .ctrl_ops = &dp_ops_ctrl,
  8998. .me_ops = &dp_ops_me,
  8999. .mon_ops = &dp_ops_mon,
  9000. .host_stats_ops = &dp_ops_host_stats,
  9001. .wds_ops = &dp_ops_wds,
  9002. .raw_ops = &dp_ops_raw,
  9003. #ifdef PEER_FLOW_CONTROL
  9004. .pflow_ops = &dp_ops_pflow,
  9005. #endif /* PEER_FLOW_CONTROL */
  9006. #ifdef DP_PEER_EXTENDED_API
  9007. .misc_ops = &dp_ops_misc,
  9008. .ocb_ops = &dp_ops_ocb,
  9009. .peer_ops = &dp_ops_peer,
  9010. .mob_stats_ops = &dp_ops_mob_stats,
  9011. #endif
  9012. #ifdef DP_FLOW_CTL
  9013. .cfg_ops = &dp_ops_cfg,
  9014. .flowctl_ops = &dp_ops_flowctl,
  9015. .l_flowctl_ops = &dp_ops_l_flowctl,
  9016. .throttle_ops = &dp_ops_throttle,
  9017. #endif
  9018. #ifdef IPA_OFFLOAD
  9019. .ipa_ops = &dp_ops_ipa,
  9020. #endif
  9021. #ifdef DP_POWER_SAVE
  9022. .bus_ops = &dp_ops_bus,
  9023. #endif
  9024. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  9025. .cfr_ops = &dp_ops_cfr,
  9026. #endif
  9027. };
  9028. /*
  9029. * dp_soc_set_txrx_ring_map()
  9030. * @dp_soc: DP handler for soc
  9031. *
  9032. * Return: Void
  9033. */
  9034. void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  9035. {
  9036. uint32_t i;
  9037. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  9038. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  9039. }
  9040. }
  9041. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
  9042. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  9043. /**
  9044. * dp_soc_attach_wifi3() - Attach txrx SOC
  9045. * @ctrl_psoc: Opaque SOC handle from control plane
  9046. * @htc_handle: Opaque HTC handle
  9047. * @hif_handle: Opaque HIF handle
  9048. * @qdf_osdev: QDF device
  9049. * @ol_ops: Offload Operations
  9050. * @device_id: Device ID
  9051. *
  9052. * Return: DP SOC handle on success, NULL on failure
  9053. */
  9054. struct cdp_soc_t *
  9055. dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  9056. struct hif_opaque_softc *hif_handle,
  9057. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  9058. struct ol_if_ops *ol_ops, uint16_t device_id)
  9059. {
  9060. struct dp_soc *dp_soc = NULL;
  9061. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  9062. ol_ops, device_id);
  9063. if (!dp_soc)
  9064. return NULL;
  9065. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  9066. return NULL;
  9067. return dp_soc_to_cdp_soc_t(dp_soc);
  9068. }
  9069. #else
  9070. /**
  9071. * dp_soc_attach_wifi3() - Attach txrx SOC
  9072. * @ctrl_psoc: Opaque SOC handle from control plane
  9073. * @htc_handle: Opaque HTC handle
  9074. * @hif_handle: Opaque HIF handle
  9075. * @qdf_osdev: QDF device
  9076. * @ol_ops: Offload Operations
  9077. * @device_id: Device ID
  9078. *
  9079. * Return: DP SOC handle on success, NULL on failure
  9080. */
  9081. struct cdp_soc_t *
  9082. dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  9083. struct hif_opaque_softc *hif_handle,
  9084. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  9085. struct ol_if_ops *ol_ops, uint16_t device_id)
  9086. {
  9087. struct dp_soc *dp_soc = NULL;
  9088. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  9089. ol_ops, device_id);
  9090. return dp_soc_to_cdp_soc_t(dp_soc);
  9091. }
  9092. #endif
  9093. /**
  9094. * dp_soc_attach() - Attach txrx SOC
  9095. * @ctrl_psoc: Opaque SOC handle from control plane
  9096. * @htc_handle: Opaque HTC handle
  9097. * @qdf_osdev: QDF device
  9098. * @ol_ops: Offload Operations
  9099. * @device_id: Device ID
  9100. *
  9101. * Return: DP SOC handle on success, NULL on failure
  9102. */
  9103. static struct dp_soc *
  9104. dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
  9105. qdf_device_t qdf_osdev,
  9106. struct ol_if_ops *ol_ops, uint16_t device_id)
  9107. {
  9108. int int_ctx;
  9109. struct dp_soc *soc = NULL;
  9110. struct htt_soc *htt_soc;
  9111. soc = qdf_mem_malloc(sizeof(*soc));
  9112. if (!soc) {
  9113. dp_err("DP SOC memory allocation failed");
  9114. goto fail0;
  9115. }
  9116. int_ctx = 0;
  9117. soc->device_id = device_id;
  9118. soc->cdp_soc.ops = &dp_txrx_ops;
  9119. soc->cdp_soc.ol_ops = ol_ops;
  9120. soc->ctrl_psoc = ctrl_psoc;
  9121. soc->osdev = qdf_osdev;
  9122. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  9123. wlan_set_srng_cfg(&soc->wlan_srng_cfg);
  9124. qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
  9125. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  9126. if (!soc->wlan_cfg_ctx) {
  9127. dp_err("wlan_cfg_ctx failed\n");
  9128. goto fail1;
  9129. }
  9130. dp_soc_set_interrupt_mode(soc);
  9131. htt_soc = htt_soc_attach(soc, htc_handle);
  9132. if (!htt_soc)
  9133. goto fail1;
  9134. soc->htt_handle = htt_soc;
  9135. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  9136. goto fail2;
  9137. return soc;
  9138. fail2:
  9139. htt_soc_detach(htt_soc);
  9140. fail1:
  9141. qdf_mem_free(soc);
  9142. fail0:
  9143. return NULL;
  9144. }
  9145. /**
  9146. * dp_soc_init() - Initialize txrx SOC
  9147. * @dp_soc: Opaque DP SOC handle
  9148. * @htc_handle: Opaque HTC handle
  9149. * @hif_handle: Opaque HIF handle
  9150. *
  9151. * Return: DP SOC handle on success, NULL on failure
  9152. */
  9153. void *dp_soc_init(struct dp_soc *dpsoc, HTC_HANDLE htc_handle,
  9154. struct hif_opaque_softc *hif_handle)
  9155. {
  9156. int target_type;
  9157. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  9158. struct htt_soc *htt_soc = soc->htt_handle;
  9159. htt_set_htc_handle(htt_soc, htc_handle);
  9160. soc->hif_handle = hif_handle;
  9161. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  9162. if (!soc->hal_soc)
  9163. return NULL;
  9164. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
  9165. htt_get_htc_handle(htt_soc),
  9166. soc->hal_soc, soc->osdev);
  9167. target_type = hal_get_target_type(soc->hal_soc);
  9168. switch (target_type) {
  9169. case TARGET_TYPE_QCA6290:
  9170. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  9171. REO_DST_RING_SIZE_QCA6290);
  9172. soc->ast_override_support = 1;
  9173. soc->da_war_enabled = false;
  9174. break;
  9175. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
  9176. case TARGET_TYPE_QCA6390:
  9177. case TARGET_TYPE_QCA6490:
  9178. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  9179. REO_DST_RING_SIZE_QCA6290);
  9180. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  9181. soc->ast_override_support = 1;
  9182. if (soc->cdp_soc.ol_ops->get_con_mode &&
  9183. soc->cdp_soc.ol_ops->get_con_mode() ==
  9184. QDF_GLOBAL_MONITOR_MODE) {
  9185. int int_ctx;
  9186. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  9187. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  9188. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  9189. }
  9190. }
  9191. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  9192. break;
  9193. #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 */
  9194. case TARGET_TYPE_QCA8074:
  9195. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  9196. REO_DST_RING_SIZE_QCA8074);
  9197. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  9198. soc->da_war_enabled = true;
  9199. soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
  9200. break;
  9201. case TARGET_TYPE_QCA8074V2:
  9202. case TARGET_TYPE_QCA6018:
  9203. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  9204. REO_DST_RING_SIZE_QCA8074);
  9205. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  9206. soc->hw_nac_monitor_support = 1;
  9207. soc->ast_override_support = 1;
  9208. soc->per_tid_basize_max_tid = 8;
  9209. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  9210. soc->da_war_enabled = false;
  9211. soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
  9212. break;
  9213. case TARGET_TYPE_QCN9000:
  9214. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  9215. REO_DST_RING_SIZE_QCN9000);
  9216. soc->ast_override_support = 1;
  9217. soc->da_war_enabled = false;
  9218. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  9219. soc->hw_nac_monitor_support = 1;
  9220. soc->per_tid_basize_max_tid = 8;
  9221. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  9222. break;
  9223. default:
  9224. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  9225. qdf_assert_always(0);
  9226. break;
  9227. }
  9228. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  9229. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  9230. soc->cce_disable = false;
  9231. qdf_atomic_init(&soc->num_tx_outstanding);
  9232. soc->num_tx_allowed =
  9233. wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
  9234. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  9235. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  9236. CDP_CFG_MAX_PEER_ID);
  9237. if (ret != -EINVAL) {
  9238. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  9239. }
  9240. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  9241. CDP_CFG_CCE_DISABLE);
  9242. if (ret == 1)
  9243. soc->cce_disable = true;
  9244. }
  9245. qdf_spinlock_create(&soc->peer_ref_mutex);
  9246. qdf_spinlock_create(&soc->ast_lock);
  9247. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  9248. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  9249. /* fill the tx/rx cpu ring map*/
  9250. dp_soc_set_txrx_ring_map(soc);
  9251. qdf_spinlock_create(&soc->htt_stats.lock);
  9252. /* initialize work queue for stats processing */
  9253. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  9254. return soc;
  9255. }
  9256. /**
  9257. * dp_soc_init_wifi3() - Initialize txrx SOC
  9258. * @dp_soc: Opaque DP SOC handle
  9259. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  9260. * @hif_handle: Opaque HIF handle
  9261. * @htc_handle: Opaque HTC handle
  9262. * @qdf_osdev: QDF device (Unused)
  9263. * @ol_ops: Offload Operations (Unused)
  9264. * @device_id: Device ID (Unused)
  9265. *
  9266. * Return: DP SOC handle on success, NULL on failure
  9267. */
  9268. void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
  9269. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  9270. struct hif_opaque_softc *hif_handle,
  9271. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  9272. struct ol_if_ops *ol_ops, uint16_t device_id)
  9273. {
  9274. return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
  9275. }
  9276. #endif
  9277. /*
  9278. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  9279. *
  9280. * @soc: handle to DP soc
  9281. * @mac_id: MAC id
  9282. *
  9283. * Return: Return pdev corresponding to MAC
  9284. */
  9285. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  9286. {
  9287. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  9288. return soc->pdev_list[mac_id];
  9289. /* Typically for MCL as there only 1 PDEV*/
  9290. return soc->pdev_list[0];
  9291. }
  9292. /*
  9293. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  9294. * @soc: DP SoC context
  9295. * @max_mac_rings: No of MAC rings
  9296. *
  9297. * Return: None
  9298. */
  9299. static
  9300. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  9301. int *max_mac_rings)
  9302. {
  9303. bool dbs_enable = false;
  9304. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  9305. dbs_enable = soc->cdp_soc.ol_ops->
  9306. is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
  9307. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  9308. }
  9309. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  9310. /*
  9311. * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR
  9312. * @soc_hdl: Datapath soc handle
  9313. * @pdev_id: id of data path pdev handle
  9314. * @enable: Enable/Disable CFR
  9315. * @filter_val: Flag to select Filter for monitor mode
  9316. */
  9317. static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
  9318. uint8_t pdev_id,
  9319. bool enable,
  9320. struct cdp_monitor_filter *filter_val)
  9321. {
  9322. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  9323. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  9324. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  9325. int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  9326. uint8_t mac_id = 0;
  9327. if (pdev->monitor_vdev) {
  9328. dp_info("No action is needed since monitor mode is enabled\n");
  9329. return;
  9330. }
  9331. soc = pdev->soc;
  9332. pdev->cfr_rcc_mode = false;
  9333. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  9334. dp_debug("Max_mac_rings %d", max_mac_rings);
  9335. dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
  9336. if (enable) {
  9337. pdev->cfr_rcc_mode = true;
  9338. htt_tlv_filter.ppdu_start = 1;
  9339. htt_tlv_filter.ppdu_end = 1;
  9340. htt_tlv_filter.ppdu_end_user_stats = 1;
  9341. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  9342. htt_tlv_filter.ppdu_end_status_done = 1;
  9343. htt_tlv_filter.mpdu_start = 1;
  9344. htt_tlv_filter.offset_valid = false;
  9345. htt_tlv_filter.enable_fp =
  9346. (filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
  9347. htt_tlv_filter.enable_md = 0;
  9348. htt_tlv_filter.enable_mo =
  9349. (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
  9350. htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
  9351. htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
  9352. htt_tlv_filter.fp_data_filter = filter_val->fp_data;
  9353. htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
  9354. htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
  9355. htt_tlv_filter.mo_data_filter = filter_val->mo_data;
  9356. }
  9357. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  9358. int mac_for_pdev =
  9359. dp_get_mac_id_for_pdev(mac_id,
  9360. pdev->pdev_id);
  9361. htt_h2t_rx_ring_cfg(soc->htt_handle,
  9362. mac_for_pdev,
  9363. pdev->rxdma_mon_status_ring[mac_id]
  9364. .hal_srng,
  9365. RXDMA_MONITOR_STATUS,
  9366. RX_BUFFER_SIZE,
  9367. &htt_tlv_filter);
  9368. }
  9369. }
  9370. #endif
  9371. /*
  9372. * dp_is_soc_reinit() - Check if soc reinit is true
  9373. * @soc: DP SoC context
  9374. *
  9375. * Return: true or false
  9376. */
  9377. bool dp_is_soc_reinit(struct dp_soc *soc)
  9378. {
  9379. return soc->dp_soc_reinit;
  9380. }
  9381. /*
  9382. * dp_set_pktlog_wifi3() - attach txrx vdev
  9383. * @pdev: Datapath PDEV handle
  9384. * @event: which event's notifications are being subscribed to
  9385. * @enable: WDI event subscribe or not. (True or False)
  9386. *
  9387. * Return: Success, NULL on failure
  9388. */
  9389. #ifdef WDI_EVENT_ENABLE
  9390. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  9391. bool enable)
  9392. {
  9393. struct dp_soc *soc = NULL;
  9394. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  9395. int max_mac_rings = wlan_cfg_get_num_mac_rings
  9396. (pdev->wlan_cfg_ctx);
  9397. uint8_t mac_id = 0;
  9398. soc = pdev->soc;
  9399. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  9400. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  9401. FL("Max_mac_rings %d "),
  9402. max_mac_rings);
  9403. if (enable) {
  9404. switch (event) {
  9405. case WDI_EVENT_RX_DESC:
  9406. if (pdev->monitor_vdev) {
  9407. /* Nothing needs to be done if monitor mode is
  9408. * enabled
  9409. */
  9410. return 0;
  9411. }
  9412. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  9413. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  9414. htt_tlv_filter.mpdu_start = 1;
  9415. htt_tlv_filter.msdu_start = 1;
  9416. htt_tlv_filter.msdu_end = 1;
  9417. htt_tlv_filter.mpdu_end = 1;
  9418. htt_tlv_filter.packet_header = 1;
  9419. htt_tlv_filter.attention = 1;
  9420. htt_tlv_filter.ppdu_start = 1;
  9421. htt_tlv_filter.ppdu_end = 1;
  9422. htt_tlv_filter.ppdu_end_user_stats = 1;
  9423. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  9424. htt_tlv_filter.ppdu_end_status_done = 1;
  9425. htt_tlv_filter.enable_fp = 1;
  9426. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  9427. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  9428. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  9429. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  9430. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  9431. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  9432. htt_tlv_filter.offset_valid = false;
  9433. for (mac_id = 0; mac_id < max_mac_rings;
  9434. mac_id++) {
  9435. int mac_for_pdev =
  9436. dp_get_mac_id_for_pdev(mac_id,
  9437. pdev->pdev_id);
  9438. htt_h2t_rx_ring_cfg(soc->htt_handle,
  9439. mac_for_pdev,
  9440. pdev->rxdma_mon_status_ring[mac_id]
  9441. .hal_srng,
  9442. RXDMA_MONITOR_STATUS,
  9443. RX_BUFFER_SIZE,
  9444. &htt_tlv_filter);
  9445. }
  9446. if (soc->reap_timer_init)
  9447. qdf_timer_mod(&soc->mon_reap_timer,
  9448. DP_INTR_POLL_TIMER_MS);
  9449. }
  9450. break;
  9451. case WDI_EVENT_LITE_RX:
  9452. if (pdev->monitor_vdev) {
  9453. /* Nothing needs to be done if monitor mode is
  9454. * enabled
  9455. */
  9456. return 0;
  9457. }
  9458. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  9459. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  9460. htt_tlv_filter.ppdu_start = 1;
  9461. htt_tlv_filter.ppdu_end = 1;
  9462. htt_tlv_filter.ppdu_end_user_stats = 1;
  9463. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  9464. htt_tlv_filter.ppdu_end_status_done = 1;
  9465. htt_tlv_filter.mpdu_start = 1;
  9466. htt_tlv_filter.enable_fp = 1;
  9467. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  9468. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  9469. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  9470. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  9471. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  9472. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  9473. htt_tlv_filter.offset_valid = false;
  9474. for (mac_id = 0; mac_id < max_mac_rings;
  9475. mac_id++) {
  9476. int mac_for_pdev =
  9477. dp_get_mac_id_for_pdev(mac_id,
  9478. pdev->pdev_id);
  9479. htt_h2t_rx_ring_cfg(soc->htt_handle,
  9480. mac_for_pdev,
  9481. pdev->rxdma_mon_status_ring[mac_id]
  9482. .hal_srng,
  9483. RXDMA_MONITOR_STATUS,
  9484. RX_BUFFER_SIZE_PKTLOG_LITE,
  9485. &htt_tlv_filter);
  9486. }
  9487. if (soc->reap_timer_init)
  9488. qdf_timer_mod(&soc->mon_reap_timer,
  9489. DP_INTR_POLL_TIMER_MS);
  9490. }
  9491. break;
  9492. case WDI_EVENT_LITE_T2H:
  9493. if (pdev->monitor_vdev) {
  9494. /* Nothing needs to be done if monitor mode is
  9495. * enabled
  9496. */
  9497. return 0;
  9498. }
  9499. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  9500. int mac_for_pdev = dp_get_mac_id_for_pdev(
  9501. mac_id, pdev->pdev_id);
  9502. pdev->pktlog_ppdu_stats = true;
  9503. dp_h2t_cfg_stats_msg_send(pdev,
  9504. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  9505. mac_for_pdev);
  9506. }
  9507. break;
  9508. default:
  9509. /* Nothing needs to be done for other pktlog types */
  9510. break;
  9511. }
  9512. } else {
  9513. switch (event) {
  9514. case WDI_EVENT_RX_DESC:
  9515. case WDI_EVENT_LITE_RX:
  9516. if (pdev->monitor_vdev) {
  9517. /* Nothing needs to be done if monitor mode is
  9518. * enabled
  9519. */
  9520. return 0;
  9521. }
  9522. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  9523. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  9524. for (mac_id = 0; mac_id < max_mac_rings;
  9525. mac_id++) {
  9526. int mac_for_pdev =
  9527. dp_get_mac_id_for_pdev(mac_id,
  9528. pdev->pdev_id);
  9529. htt_h2t_rx_ring_cfg(soc->htt_handle,
  9530. mac_for_pdev,
  9531. pdev->rxdma_mon_status_ring[mac_id]
  9532. .hal_srng,
  9533. RXDMA_MONITOR_STATUS,
  9534. RX_BUFFER_SIZE,
  9535. &htt_tlv_filter);
  9536. }
  9537. if (soc->reap_timer_init)
  9538. qdf_timer_stop(&soc->mon_reap_timer);
  9539. }
  9540. break;
  9541. case WDI_EVENT_LITE_T2H:
  9542. if (pdev->monitor_vdev) {
  9543. /* Nothing needs to be done if monitor mode is
  9544. * enabled
  9545. */
  9546. return 0;
  9547. }
  9548. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  9549. * passing value 0. Once these macros will define in htt
  9550. * header file will use proper macros
  9551. */
  9552. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  9553. int mac_for_pdev =
  9554. dp_get_mac_id_for_pdev(mac_id,
  9555. pdev->pdev_id);
  9556. pdev->pktlog_ppdu_stats = false;
  9557. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  9558. dp_h2t_cfg_stats_msg_send(pdev, 0,
  9559. mac_for_pdev);
  9560. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  9561. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  9562. mac_for_pdev);
  9563. } else if (pdev->enhanced_stats_en) {
  9564. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  9565. mac_for_pdev);
  9566. }
  9567. }
  9568. break;
  9569. default:
  9570. /* Nothing needs to be done for other pktlog types */
  9571. break;
  9572. }
  9573. }
  9574. return 0;
  9575. }
  9576. #endif
  9577. /**
  9578. * dp_bucket_index() - Return index from array
  9579. *
  9580. * @delay: delay measured
  9581. * @array: array used to index corresponding delay
  9582. *
  9583. * Return: index
  9584. */
  9585. static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
  9586. {
  9587. uint8_t i = CDP_DELAY_BUCKET_0;
  9588. for (; i < CDP_DELAY_BUCKET_MAX; i++) {
  9589. if (delay >= array[i] && delay <= array[i + 1])
  9590. return i;
  9591. }
  9592. return (CDP_DELAY_BUCKET_MAX - 1);
  9593. }
  9594. /**
  9595. * dp_fill_delay_buckets() - Fill delay statistics bucket for each
  9596. * type of delay
  9597. *
  9598. * @pdev: pdev handle
  9599. * @delay: delay in ms
  9600. * @tid: tid value
  9601. * @mode: type of tx delay mode
  9602. * @ring_id: ring number
  9603. * Return: pointer to cdp_delay_stats structure
  9604. */
  9605. static struct cdp_delay_stats *
  9606. dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
  9607. uint8_t tid, uint8_t mode, uint8_t ring_id)
  9608. {
  9609. uint8_t delay_index = 0;
  9610. struct cdp_tid_tx_stats *tstats =
  9611. &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  9612. struct cdp_tid_rx_stats *rstats =
  9613. &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  9614. /*
  9615. * cdp_fw_to_hw_delay_range
  9616. * Fw to hw delay ranges in milliseconds
  9617. */
  9618. uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
  9619. 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
  9620. /*
  9621. * cdp_sw_enq_delay_range
  9622. * Software enqueue delay ranges in milliseconds
  9623. */
  9624. uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
  9625. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
  9626. /*
  9627. * cdp_intfrm_delay_range
  9628. * Interframe delay ranges in milliseconds
  9629. */
  9630. uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
  9631. 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
  9632. /*
  9633. * Update delay stats in proper bucket
  9634. */
  9635. switch (mode) {
  9636. /* Software Enqueue delay ranges */
  9637. case CDP_DELAY_STATS_SW_ENQ:
  9638. delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
  9639. tstats->swq_delay.delay_bucket[delay_index]++;
  9640. return &tstats->swq_delay;
  9641. /* Tx Completion delay ranges */
  9642. case CDP_DELAY_STATS_FW_HW_TRANSMIT:
  9643. delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
  9644. tstats->hwtx_delay.delay_bucket[delay_index]++;
  9645. return &tstats->hwtx_delay;
  9646. /* Interframe tx delay ranges */
  9647. case CDP_DELAY_STATS_TX_INTERFRAME:
  9648. delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
  9649. tstats->intfrm_delay.delay_bucket[delay_index]++;
  9650. return &tstats->intfrm_delay;
  9651. /* Interframe rx delay ranges */
  9652. case CDP_DELAY_STATS_RX_INTERFRAME:
  9653. delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
  9654. rstats->intfrm_delay.delay_bucket[delay_index]++;
  9655. return &rstats->intfrm_delay;
  9656. /* Ring reap to indication to network stack */
  9657. case CDP_DELAY_STATS_REAP_STACK:
  9658. delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
  9659. rstats->to_stack_delay.delay_bucket[delay_index]++;
  9660. return &rstats->to_stack_delay;
  9661. default:
  9662. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  9663. "%s Incorrect delay mode: %d", __func__, mode);
  9664. }
  9665. return NULL;
  9666. }
  9667. /**
  9668. * dp_update_delay_stats() - Update delay statistics in structure
  9669. * and fill min, max and avg delay
  9670. *
  9671. * @pdev: pdev handle
  9672. * @delay: delay in ms
  9673. * @tid: tid value
  9674. * @mode: type of tx delay mode
  9675. * @ring id: ring number
  9676. * Return: none
  9677. */
  9678. void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
  9679. uint8_t tid, uint8_t mode, uint8_t ring_id)
  9680. {
  9681. struct cdp_delay_stats *dstats = NULL;
  9682. /*
  9683. * Delay ranges are different for different delay modes
  9684. * Get the correct index to update delay bucket
  9685. */
  9686. dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
  9687. if (qdf_unlikely(!dstats))
  9688. return;
  9689. if (delay != 0) {
  9690. /*
  9691. * Compute minimum,average and maximum
  9692. * delay
  9693. */
  9694. if (delay < dstats->min_delay)
  9695. dstats->min_delay = delay;
  9696. if (delay > dstats->max_delay)
  9697. dstats->max_delay = delay;
  9698. /*
  9699. * Average over delay measured till now
  9700. */
  9701. if (!dstats->avg_delay)
  9702. dstats->avg_delay = delay;
  9703. else
  9704. dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
  9705. }
  9706. }