dp_main.c 269 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_hw_headers.h>
  24. #include <hal_api.h>
  25. #include <hif.h>
  26. #include <htt.h>
  27. #include <wdi_event.h>
  28. #include <queue.h>
  29. #include "dp_htt.h"
  30. #include "dp_types.h"
  31. #include "dp_internal.h"
  32. #include "dp_tx.h"
  33. #include "dp_tx_desc.h"
  34. #include "dp_rx.h"
  35. #include <cdp_txrx_handle.h>
  36. #include <wlan_cfg.h>
  37. #include "cdp_txrx_cmn_struct.h"
  38. #include "cdp_txrx_stats_struct.h"
  39. #include "cdp_txrx_cmn_reg.h"
  40. #include <qdf_util.h>
  41. #include "dp_peer.h"
  42. #include "dp_rx_mon.h"
  43. #include "htt_stats.h"
  44. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  45. #include "cfg_ucfg_api.h"
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. #include "cdp_txrx_flow_ctrl_v2.h"
  48. #else
  49. static inline void
  50. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  51. {
  52. return;
  53. }
  54. #endif
  55. #include "dp_ipa.h"
  56. #include "dp_cal_client_api.h"
  57. #ifdef CONFIG_MCL
  58. extern int con_mode_monitor;
  59. #ifndef REMOVE_PKT_LOG
  60. #include <pktlog_ac_api.h>
  61. #include <pktlog_ac.h>
  62. #endif
  63. #endif
  64. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
  65. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
  66. static struct dp_soc *
  67. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  68. struct ol_if_ops *ol_ops, uint16_t device_id);
  69. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  70. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  71. uint8_t *peer_mac_addr,
  72. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  73. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
  74. static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
  75. static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
  76. #define DP_INTR_POLL_TIMER_MS 10
  77. /* Generic AST entry aging timer value */
  78. #define DP_AST_AGING_TIMER_DEFAULT_MS 1000
  79. /* WDS AST entry aging timer value */
  80. #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
  81. #define DP_WDS_AST_AGING_TIMER_CNT \
  82. ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
  83. #define DP_MCS_LENGTH (6*MAX_MCS)
  84. #define DP_NSS_LENGTH (6*SS_COUNT)
  85. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  86. #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
  87. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  88. #define DP_MAX_MCS_STRING_LEN 30
  89. #define DP_CURR_FW_STATS_AVAIL 19
  90. #define DP_HTT_DBG_EXT_STATS_MAX 256
  91. #define DP_MAX_SLEEP_TIME 100
  92. #ifdef IPA_OFFLOAD
  93. /* Exclude IPA rings from the interrupt context */
  94. #define TX_RING_MASK_VAL 0xb
  95. #define RX_RING_MASK_VAL 0x7
  96. #else
  97. #define TX_RING_MASK_VAL 0xF
  98. #define RX_RING_MASK_VAL 0xF
  99. #endif
  100. #define STR_MAXLEN 64
  101. #define DP_PPDU_STATS_CFG_ALL 0xFFFF
  102. /* PPDU stats mask sent to FW to enable enhanced stats */
  103. #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
  104. /* PPDU stats mask sent to FW to support debug sniffer feature */
  105. #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
  106. /* PPDU stats mask sent to FW to support BPR feature*/
  107. #define DP_PPDU_STATS_CFG_BPR 0x2000
  108. /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
  109. #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
  110. DP_PPDU_STATS_CFG_ENH_STATS)
  111. /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
  112. #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
  113. DP_PPDU_TXLITE_STATS_BITMASK_CFG)
  114. #define RNG_ERR "SRNG setup failed for"
  115. /**
  116. * default_dscp_tid_map - Default DSCP-TID mapping
  117. *
  118. * DSCP TID
  119. * 000000 0
  120. * 001000 1
  121. * 010000 2
  122. * 011000 3
  123. * 100000 4
  124. * 101000 5
  125. * 110000 6
  126. * 111000 7
  127. */
  128. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  129. 0, 0, 0, 0, 0, 0, 0, 0,
  130. 1, 1, 1, 1, 1, 1, 1, 1,
  131. 2, 2, 2, 2, 2, 2, 2, 2,
  132. 3, 3, 3, 3, 3, 3, 3, 3,
  133. 4, 4, 4, 4, 4, 4, 4, 4,
  134. 5, 5, 5, 5, 5, 5, 5, 5,
  135. 6, 6, 6, 6, 6, 6, 6, 6,
  136. 7, 7, 7, 7, 7, 7, 7, 7,
  137. };
  138. /*
  139. * struct dp_rate_debug
  140. *
  141. * @mcs_type: print string for a given mcs
  142. * @valid: valid mcs rate?
  143. */
  144. struct dp_rate_debug {
  145. char mcs_type[DP_MAX_MCS_STRING_LEN];
  146. uint8_t valid;
  147. };
  148. #define MCS_VALID 1
  149. #define MCS_INVALID 0
  150. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  151. {
  152. {"OFDM 48 Mbps", MCS_VALID},
  153. {"OFDM 24 Mbps", MCS_VALID},
  154. {"OFDM 12 Mbps", MCS_VALID},
  155. {"OFDM 6 Mbps ", MCS_VALID},
  156. {"OFDM 54 Mbps", MCS_VALID},
  157. {"OFDM 36 Mbps", MCS_VALID},
  158. {"OFDM 18 Mbps", MCS_VALID},
  159. {"OFDM 9 Mbps ", MCS_VALID},
  160. {"INVALID ", MCS_INVALID},
  161. {"INVALID ", MCS_INVALID},
  162. {"INVALID ", MCS_INVALID},
  163. {"INVALID ", MCS_INVALID},
  164. {"INVALID ", MCS_VALID},
  165. },
  166. {
  167. {"CCK 11 Mbps Long ", MCS_VALID},
  168. {"CCK 5.5 Mbps Long ", MCS_VALID},
  169. {"CCK 2 Mbps Long ", MCS_VALID},
  170. {"CCK 1 Mbps Long ", MCS_VALID},
  171. {"CCK 11 Mbps Short ", MCS_VALID},
  172. {"CCK 5.5 Mbps Short", MCS_VALID},
  173. {"CCK 2 Mbps Short ", MCS_VALID},
  174. {"INVALID ", MCS_INVALID},
  175. {"INVALID ", MCS_INVALID},
  176. {"INVALID ", MCS_INVALID},
  177. {"INVALID ", MCS_INVALID},
  178. {"INVALID ", MCS_INVALID},
  179. {"INVALID ", MCS_VALID},
  180. },
  181. {
  182. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  183. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  184. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  185. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  186. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  187. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  188. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  189. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  190. {"INVALID ", MCS_INVALID},
  191. {"INVALID ", MCS_INVALID},
  192. {"INVALID ", MCS_INVALID},
  193. {"INVALID ", MCS_INVALID},
  194. {"INVALID ", MCS_VALID},
  195. },
  196. {
  197. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  198. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  199. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  200. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  201. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  202. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  203. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  204. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  205. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  206. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  207. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  208. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  209. {"INVALID ", MCS_VALID},
  210. },
  211. {
  212. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  213. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  214. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  215. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  216. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  217. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  218. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  219. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  220. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  221. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  222. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  223. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  224. {"INVALID ", MCS_VALID},
  225. }
  226. };
  227. /**
  228. * dp_cpu_ring_map_type - dp tx cpu ring map
  229. * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
  230. * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
  231. * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
  232. * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
  233. * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
  234. * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
  235. */
  236. enum dp_cpu_ring_map_types {
  237. DP_NSS_DEFAULT_MAP,
  238. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  239. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  240. DP_NSS_DBDC_OFFLOADED_MAP,
  241. DP_NSS_DBTC_OFFLOADED_MAP,
  242. DP_NSS_CPU_RING_MAP_MAX
  243. };
  244. /**
  245. * @brief Cpu to tx ring map
  246. */
  247. #ifdef CONFIG_WIN
  248. static uint8_t
  249. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  250. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  251. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  252. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  253. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  254. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  255. };
  256. #else
  257. static uint8_t
  258. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  259. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  260. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  261. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  262. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  263. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
  264. };
  265. #endif
  266. /**
  267. * @brief Select the type of statistics
  268. */
  269. enum dp_stats_type {
  270. STATS_FW = 0,
  271. STATS_HOST = 1,
  272. STATS_TYPE_MAX = 2,
  273. };
  274. /**
  275. * @brief General Firmware statistics options
  276. *
  277. */
  278. enum dp_fw_stats {
  279. TXRX_FW_STATS_INVALID = -1,
  280. };
  281. /**
  282. * dp_stats_mapping_table - Firmware and Host statistics
  283. * currently supported
  284. */
  285. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  286. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  287. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  288. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  289. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  290. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  291. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  292. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  293. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  294. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  295. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  296. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  297. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  298. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  299. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  300. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  301. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  302. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  303. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  304. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  305. /* Last ENUM for HTT FW STATS */
  306. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  307. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  308. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  309. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  310. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  311. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  312. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  313. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  314. {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
  315. {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
  316. {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
  317. {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
  318. };
  319. /* MCL specific functions */
  320. #ifdef CONFIG_MCL
  321. /**
  322. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  323. * @soc: pointer to dp_soc handle
  324. * @intr_ctx_num: interrupt context number for which mon mask is needed
  325. *
  326. * For MCL, monitor mode rings are being processed in timer contexts (polled).
  327. * This function is returning 0, since in interrupt mode(softirq based RX),
  328. * we donot want to process monitor mode rings in a softirq.
  329. *
  330. * So, in case packet log is enabled for SAP/STA/P2P modes,
  331. * regular interrupt processing will not process monitor mode rings. It would be
  332. * done in a separate timer context.
  333. *
  334. * Return: 0
  335. */
  336. static inline
  337. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  338. {
  339. return 0;
  340. }
  341. /*
  342. * dp_service_mon_rings()- timer to reap monitor rings
  343. * reqd as we are not getting ppdu end interrupts
  344. * @arg: SoC Handle
  345. *
  346. * Return:
  347. *
  348. */
  349. static void dp_service_mon_rings(void *arg)
  350. {
  351. struct dp_soc *soc = (struct dp_soc *)arg;
  352. int ring = 0, work_done, mac_id;
  353. struct dp_pdev *pdev = NULL;
  354. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  355. pdev = soc->pdev_list[ring];
  356. if (!pdev)
  357. continue;
  358. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  359. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  360. pdev->pdev_id);
  361. work_done = dp_mon_process(soc, mac_for_pdev,
  362. QCA_NAPI_BUDGET);
  363. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  364. FL("Reaped %d descs from Monitor rings"),
  365. work_done);
  366. }
  367. }
  368. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  369. }
  370. #ifndef REMOVE_PKT_LOG
  371. /**
  372. * dp_pkt_log_init() - API to initialize packet log
  373. * @ppdev: physical device handle
  374. * @scn: HIF context
  375. *
  376. * Return: none
  377. */
  378. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  379. {
  380. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  381. if (handle->pkt_log_init) {
  382. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  383. "%s: Packet log not initialized", __func__);
  384. return;
  385. }
  386. pktlog_sethandle(&handle->pl_dev, scn);
  387. pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
  388. if (pktlogmod_init(scn)) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "%s: pktlogmod_init failed", __func__);
  391. handle->pkt_log_init = false;
  392. } else {
  393. handle->pkt_log_init = true;
  394. }
  395. }
  396. /**
  397. * dp_pkt_log_con_service() - connect packet log service
  398. * @ppdev: physical device handle
  399. * @scn: device context
  400. *
  401. * Return: none
  402. */
  403. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  404. {
  405. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  406. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  407. pktlog_htc_attach();
  408. }
  409. /**
  410. * dp_get_num_rx_contexts() - get number of RX contexts
  411. * @soc_hdl: cdp opaque soc handle
  412. *
  413. * Return: number of RX contexts
  414. */
  415. static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
  416. {
  417. int i;
  418. int num_rx_contexts = 0;
  419. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  420. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  421. if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
  422. num_rx_contexts++;
  423. return num_rx_contexts;
  424. }
  425. /**
  426. * dp_pktlogmod_exit() - API to cleanup pktlog info
  427. * @handle: Pdev handle
  428. *
  429. * Return: none
  430. */
  431. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  432. {
  433. void *scn = (void *)handle->soc->hif_handle;
  434. if (!scn) {
  435. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  436. "%s: Invalid hif(scn) handle", __func__);
  437. return;
  438. }
  439. pktlogmod_exit(scn);
  440. handle->pkt_log_init = false;
  441. }
  442. #endif
  443. #else
  444. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  445. /**
  446. * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
  447. * @soc: pointer to dp_soc handle
  448. * @intr_ctx_num: interrupt context number for which mon mask is needed
  449. *
  450. * Return: mon mask value
  451. */
  452. static inline
  453. uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
  454. {
  455. return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  456. }
  457. #endif
  458. /**
  459. * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
  460. * @cdp_opaque_vdev: pointer to cdp_vdev
  461. *
  462. * Return: pointer to dp_vdev
  463. */
  464. static
  465. struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
  466. {
  467. return (struct dp_vdev *)cdp_opaque_vdev;
  468. }
  469. static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
  470. struct cdp_peer *peer_hdl,
  471. uint8_t *mac_addr,
  472. enum cdp_txrx_ast_entry_type type,
  473. uint32_t flags)
  474. {
  475. return dp_peer_add_ast((struct dp_soc *)soc_hdl,
  476. (struct dp_peer *)peer_hdl,
  477. mac_addr,
  478. type,
  479. flags);
  480. }
  481. static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
  482. void *ast_entry_hdl)
  483. {
  484. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  485. qdf_spin_lock_bh(&soc->ast_lock);
  486. dp_peer_del_ast((struct dp_soc *)soc_hdl,
  487. (struct dp_ast_entry *)ast_entry_hdl);
  488. qdf_spin_unlock_bh(&soc->ast_lock);
  489. }
  490. static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
  491. struct cdp_peer *peer_hdl,
  492. uint8_t *wds_macaddr,
  493. uint32_t flags)
  494. {
  495. int status = -1;
  496. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  497. struct dp_ast_entry *ast_entry = NULL;
  498. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  499. qdf_spin_lock_bh(&soc->ast_lock);
  500. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  501. peer->vdev->pdev->pdev_id);
  502. if (ast_entry) {
  503. status = dp_peer_update_ast(soc,
  504. peer,
  505. ast_entry, flags);
  506. }
  507. qdf_spin_unlock_bh(&soc->ast_lock);
  508. return status;
  509. }
  510. /*
  511. * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
  512. * @soc_handle: Datapath SOC handle
  513. * @wds_macaddr: WDS entry MAC Address
  514. * Return: None
  515. */
  516. static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
  517. uint8_t *wds_macaddr, void *vdev_handle)
  518. {
  519. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  520. struct dp_ast_entry *ast_entry = NULL;
  521. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  522. qdf_spin_lock_bh(&soc->ast_lock);
  523. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
  524. vdev->pdev->pdev_id);
  525. if (ast_entry) {
  526. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  527. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  528. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
  529. ast_entry->is_active = TRUE;
  530. }
  531. }
  532. qdf_spin_unlock_bh(&soc->ast_lock);
  533. }
  534. /*
  535. * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
  536. * @soc: Datapath SOC handle
  537. *
  538. * Return: None
  539. */
  540. static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
  541. void *vdev_hdl)
  542. {
  543. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  544. struct dp_pdev *pdev;
  545. struct dp_vdev *vdev;
  546. struct dp_peer *peer;
  547. struct dp_ast_entry *ase, *temp_ase;
  548. int i;
  549. qdf_spin_lock_bh(&soc->ast_lock);
  550. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  551. pdev = soc->pdev_list[i];
  552. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  553. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  554. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  555. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  556. if ((ase->type ==
  557. CDP_TXRX_AST_TYPE_STATIC) ||
  558. (ase->type ==
  559. CDP_TXRX_AST_TYPE_SELF) ||
  560. (ase->type ==
  561. CDP_TXRX_AST_TYPE_STA_BSS))
  562. continue;
  563. ase->is_active = TRUE;
  564. }
  565. }
  566. }
  567. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  568. }
  569. qdf_spin_unlock_bh(&soc->ast_lock);
  570. }
  571. /*
  572. * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
  573. * @soc: Datapath SOC handle
  574. *
  575. * Return: None
  576. */
  577. static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
  578. {
  579. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  580. struct dp_pdev *pdev;
  581. struct dp_vdev *vdev;
  582. struct dp_peer *peer;
  583. struct dp_ast_entry *ase, *temp_ase;
  584. int i;
  585. qdf_spin_lock_bh(&soc->ast_lock);
  586. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  587. pdev = soc->pdev_list[i];
  588. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  589. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  590. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  591. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  592. if ((ase->type ==
  593. CDP_TXRX_AST_TYPE_STATIC) ||
  594. (ase->type ==
  595. CDP_TXRX_AST_TYPE_SELF) ||
  596. (ase->type ==
  597. CDP_TXRX_AST_TYPE_STA_BSS))
  598. continue;
  599. dp_peer_del_ast(soc, ase);
  600. }
  601. }
  602. }
  603. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  604. }
  605. qdf_spin_unlock_bh(&soc->ast_lock);
  606. }
  607. static void *dp_peer_ast_hash_find_soc_wifi3(struct cdp_soc_t *soc_hdl,
  608. uint8_t *ast_mac_addr)
  609. {
  610. struct dp_ast_entry *ast_entry;
  611. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  612. qdf_spin_lock_bh(&soc->ast_lock);
  613. ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
  614. qdf_spin_unlock_bh(&soc->ast_lock);
  615. return (void *)ast_entry;
  616. }
  617. static void *dp_peer_ast_hash_find_by_pdevid_wifi3(struct cdp_soc_t *soc_hdl,
  618. uint8_t *ast_mac_addr,
  619. uint8_t pdev_id)
  620. {
  621. struct dp_ast_entry *ast_entry;
  622. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  623. qdf_spin_lock_bh(&soc->ast_lock);
  624. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
  625. qdf_spin_unlock_bh(&soc->ast_lock);
  626. return (void *)ast_entry;
  627. }
  628. static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
  629. void *ast_entry_hdl)
  630. {
  631. return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
  632. (struct dp_ast_entry *)ast_entry_hdl);
  633. }
  634. static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
  635. void *ast_entry_hdl)
  636. {
  637. return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
  638. (struct dp_ast_entry *)ast_entry_hdl);
  639. }
  640. static void dp_peer_ast_set_type_wifi3(
  641. struct cdp_soc_t *soc_hdl,
  642. void *ast_entry_hdl,
  643. enum cdp_txrx_ast_entry_type type)
  644. {
  645. dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
  646. (struct dp_ast_entry *)ast_entry_hdl,
  647. type);
  648. }
  649. static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
  650. struct cdp_soc_t *soc_hdl,
  651. void *ast_entry_hdl)
  652. {
  653. return ((struct dp_ast_entry *)ast_entry_hdl)->type;
  654. }
  655. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  656. void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  657. void *ast_entry,
  658. void *cp_ctx)
  659. {
  660. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  661. qdf_spin_lock_bh(&soc->ast_lock);
  662. dp_peer_ast_set_cp_ctx(soc,
  663. (struct dp_ast_entry *)ast_entry, cp_ctx);
  664. qdf_spin_unlock_bh(&soc->ast_lock);
  665. }
  666. void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
  667. void *ast_entry)
  668. {
  669. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  670. void *cp_ctx = NULL;
  671. qdf_spin_lock_bh(&soc->ast_lock);
  672. cp_ctx = dp_peer_ast_get_cp_ctx(soc,
  673. (struct dp_ast_entry *)ast_entry);
  674. qdf_spin_unlock_bh(&soc->ast_lock);
  675. return cp_ctx;
  676. }
  677. bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
  678. void *ast_entry)
  679. {
  680. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  681. bool wmi_sent = false;
  682. qdf_spin_lock_bh(&soc->ast_lock);
  683. wmi_sent = dp_peer_ast_get_del_cmd_sent(soc,
  684. (struct dp_ast_entry *)
  685. ast_entry);
  686. qdf_spin_unlock_bh(&soc->ast_lock);
  687. return wmi_sent;
  688. }
  689. void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
  690. void *ast_entry)
  691. {
  692. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  693. qdf_spin_lock_bh(&soc->ast_lock);
  694. dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
  695. qdf_spin_unlock_bh(&soc->ast_lock);
  696. }
  697. #endif
  698. static struct cdp_peer *dp_peer_ast_get_peer_wifi3(
  699. struct cdp_soc_t *soc_hdl,
  700. void *ast_entry_hdl)
  701. {
  702. return (struct cdp_peer *)((struct dp_ast_entry *)ast_entry_hdl)->peer;
  703. }
  704. static uint32_t dp_peer_ast_get_nexhop_peer_id_wifi3(
  705. struct cdp_soc_t *soc_hdl,
  706. void *ast_entry_hdl)
  707. {
  708. return ((struct dp_ast_entry *)ast_entry_hdl)->peer->peer_ids[0];
  709. }
  710. /**
  711. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  712. * @ring_num: ring num of the ring being queried
  713. * @grp_mask: the grp_mask array for the ring type in question.
  714. *
  715. * The grp_mask array is indexed by group number and the bit fields correspond
  716. * to ring numbers. We are finding which interrupt group a ring belongs to.
  717. *
  718. * Return: the index in the grp_mask array with the ring number.
  719. * -QDF_STATUS_E_NOENT if no entry is found
  720. */
  721. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  722. {
  723. int ext_group_num;
  724. int mask = 1 << ring_num;
  725. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  726. ext_group_num++) {
  727. if (mask & grp_mask[ext_group_num])
  728. return ext_group_num;
  729. }
  730. return -QDF_STATUS_E_NOENT;
  731. }
  732. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  733. enum hal_ring_type ring_type,
  734. int ring_num)
  735. {
  736. int *grp_mask;
  737. switch (ring_type) {
  738. case WBM2SW_RELEASE:
  739. /* dp_tx_comp_handler - soc->tx_comp_ring */
  740. if (ring_num < 3)
  741. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  742. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  743. else if (ring_num == 3) {
  744. /* sw treats this as a separate ring type */
  745. grp_mask = &soc->wlan_cfg_ctx->
  746. int_rx_wbm_rel_ring_mask[0];
  747. ring_num = 0;
  748. } else {
  749. qdf_assert(0);
  750. return -QDF_STATUS_E_NOENT;
  751. }
  752. break;
  753. case REO_EXCEPTION:
  754. /* dp_rx_err_process - &soc->reo_exception_ring */
  755. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  756. break;
  757. case REO_DST:
  758. /* dp_rx_process - soc->reo_dest_ring */
  759. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  760. break;
  761. case REO_STATUS:
  762. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  763. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  764. break;
  765. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  766. case RXDMA_MONITOR_STATUS:
  767. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  768. case RXDMA_MONITOR_DST:
  769. /* dp_mon_process */
  770. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  771. break;
  772. case RXDMA_DST:
  773. /* dp_rxdma_err_process */
  774. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  775. break;
  776. case RXDMA_BUF:
  777. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  778. break;
  779. case RXDMA_MONITOR_BUF:
  780. /* TODO: support low_thresh interrupt */
  781. return -QDF_STATUS_E_NOENT;
  782. break;
  783. case TCL_DATA:
  784. case TCL_CMD:
  785. case REO_CMD:
  786. case SW2WBM_RELEASE:
  787. case WBM_IDLE_LINK:
  788. /* normally empty SW_TO_HW rings */
  789. return -QDF_STATUS_E_NOENT;
  790. break;
  791. case TCL_STATUS:
  792. case REO_REINJECT:
  793. /* misc unused rings */
  794. return -QDF_STATUS_E_NOENT;
  795. break;
  796. case CE_SRC:
  797. case CE_DST:
  798. case CE_DST_STATUS:
  799. /* CE_rings - currently handled by hif */
  800. default:
  801. return -QDF_STATUS_E_NOENT;
  802. break;
  803. }
  804. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  805. }
  806. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  807. *ring_params, int ring_type, int ring_num)
  808. {
  809. int msi_group_number;
  810. int msi_data_count;
  811. int ret;
  812. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  813. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  814. &msi_data_count, &msi_data_start,
  815. &msi_irq_start);
  816. if (ret)
  817. return;
  818. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  819. ring_num);
  820. if (msi_group_number < 0) {
  821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  822. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  823. ring_type, ring_num);
  824. ring_params->msi_addr = 0;
  825. ring_params->msi_data = 0;
  826. return;
  827. }
  828. if (msi_group_number > msi_data_count) {
  829. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  830. FL("2 msi_groups will share an msi; msi_group_num %d"),
  831. msi_group_number);
  832. QDF_ASSERT(0);
  833. }
  834. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  835. ring_params->msi_addr = addr_low;
  836. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  837. ring_params->msi_data = (msi_group_number % msi_data_count)
  838. + msi_data_start;
  839. ring_params->flags |= HAL_SRNG_MSI_INTR;
  840. }
  841. /**
  842. * dp_print_ast_stats() - Dump AST table contents
  843. * @soc: Datapath soc handle
  844. *
  845. * return void
  846. */
  847. #ifdef FEATURE_AST
  848. static void dp_print_ast_stats(struct dp_soc *soc)
  849. {
  850. uint8_t i;
  851. uint8_t num_entries = 0;
  852. struct dp_vdev *vdev;
  853. struct dp_pdev *pdev;
  854. struct dp_peer *peer;
  855. struct dp_ast_entry *ase, *tmp_ase;
  856. char type[CDP_TXRX_AST_TYPE_MAX][10] = {
  857. "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
  858. "DA", "HMWDS_SEC"};
  859. DP_PRINT_STATS("AST Stats:");
  860. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  861. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  862. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  863. DP_PRINT_STATS("AST Table:");
  864. qdf_spin_lock_bh(&soc->ast_lock);
  865. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  866. pdev = soc->pdev_list[i];
  867. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  868. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  869. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  870. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  871. DP_PRINT_STATS("%6d mac_addr = %pM"
  872. " peer_mac_addr = %pM"
  873. " type = %s"
  874. " next_hop = %d"
  875. " is_active = %d"
  876. " is_bss = %d"
  877. " ast_idx = %d"
  878. " ast_hash = %d"
  879. " pdev_id = %d"
  880. " vdev_id = %d"
  881. " del_cmd_sent = %d",
  882. ++num_entries,
  883. ase->mac_addr.raw,
  884. ase->peer->mac_addr.raw,
  885. type[ase->type],
  886. ase->next_hop,
  887. ase->is_active,
  888. ase->is_bss,
  889. ase->ast_idx,
  890. ase->ast_hash_value,
  891. ase->pdev_id,
  892. ase->vdev_id,
  893. ase->del_cmd_sent);
  894. }
  895. }
  896. }
  897. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  898. }
  899. qdf_spin_unlock_bh(&soc->ast_lock);
  900. }
  901. #else
  902. static void dp_print_ast_stats(struct dp_soc *soc)
  903. {
  904. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
  905. return;
  906. }
  907. #endif
  908. /**
  909. * dp_print_peer_table() - Dump all Peer stats
  910. * @vdev: Datapath Vdev handle
  911. *
  912. * return void
  913. */
  914. static void dp_print_peer_table(struct dp_vdev *vdev)
  915. {
  916. struct dp_peer *peer = NULL;
  917. DP_PRINT_STATS("Dumping Peer Table Stats:");
  918. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  919. if (!peer) {
  920. DP_PRINT_STATS("Invalid Peer");
  921. return;
  922. }
  923. DP_PRINT_STATS(" peer_mac_addr = %pM nawds_enabled = %d",
  924. peer->mac_addr.raw,
  925. peer->nawds_enabled);
  926. DP_PRINT_STATS(" bss_peer = %d wapi = %d wds_enabled = %d",
  927. peer->bss_peer,
  928. peer->wapi,
  929. peer->wds_enabled);
  930. DP_PRINT_STATS(" delete in progress = %d peer id = %d",
  931. peer->delete_in_progress,
  932. peer->peer_ids[0]);
  933. }
  934. }
  935. /*
  936. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  937. */
  938. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  939. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  940. {
  941. void *hal_soc = soc->hal_soc;
  942. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  943. /* TODO: See if we should get align size from hal */
  944. uint32_t ring_base_align = 8;
  945. struct hal_srng_params ring_params;
  946. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  947. /* TODO: Currently hal layer takes care of endianness related settings.
  948. * See if these settings need to passed from DP layer
  949. */
  950. ring_params.flags = 0;
  951. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  952. srng->hal_srng = NULL;
  953. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  954. srng->num_entries = num_entries;
  955. if (!soc->dp_soc_reinit) {
  956. srng->base_vaddr_unaligned =
  957. qdf_mem_alloc_consistent(soc->osdev,
  958. soc->osdev->dev,
  959. srng->alloc_size,
  960. &srng->base_paddr_unaligned);
  961. }
  962. if (!srng->base_vaddr_unaligned) {
  963. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  964. FL("alloc failed - ring_type: %d, ring_num %d"),
  965. ring_type, ring_num);
  966. return QDF_STATUS_E_NOMEM;
  967. }
  968. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  969. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  970. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  971. ((unsigned long)(ring_params.ring_base_vaddr) -
  972. (unsigned long)srng->base_vaddr_unaligned);
  973. ring_params.num_entries = num_entries;
  974. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  975. FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
  976. ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
  977. (void *)ring_params.ring_base_paddr, ring_params.num_entries);
  978. if (soc->intr_mode == DP_INTR_MSI) {
  979. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  980. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  981. FL("Using MSI for ring_type: %d, ring_num %d"),
  982. ring_type, ring_num);
  983. } else {
  984. ring_params.msi_data = 0;
  985. ring_params.msi_addr = 0;
  986. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  987. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  988. ring_type, ring_num);
  989. }
  990. /*
  991. * Setup interrupt timer and batch counter thresholds for
  992. * interrupt mitigation based on ring type
  993. */
  994. if (ring_type == REO_DST) {
  995. ring_params.intr_timer_thres_us =
  996. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  997. ring_params.intr_batch_cntr_thres_entries =
  998. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  999. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  1000. ring_params.intr_timer_thres_us =
  1001. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  1002. ring_params.intr_batch_cntr_thres_entries =
  1003. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  1004. } else {
  1005. ring_params.intr_timer_thres_us =
  1006. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  1007. ring_params.intr_batch_cntr_thres_entries =
  1008. wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
  1009. }
  1010. /* Enable low threshold interrupts for rx buffer rings (regular and
  1011. * monitor buffer rings.
  1012. * TODO: See if this is required for any other ring
  1013. */
  1014. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
  1015. (ring_type == RXDMA_MONITOR_STATUS)) {
  1016. /* TODO: Setting low threshold to 1/8th of ring size
  1017. * see if this needs to be configurable
  1018. */
  1019. ring_params.low_threshold = num_entries >> 3;
  1020. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  1021. ring_params.intr_timer_thres_us =
  1022. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  1023. ring_params.intr_batch_cntr_thres_entries = 0;
  1024. }
  1025. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  1026. mac_id, &ring_params);
  1027. if (!srng->hal_srng) {
  1028. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1029. srng->alloc_size,
  1030. srng->base_vaddr_unaligned,
  1031. srng->base_paddr_unaligned, 0);
  1032. }
  1033. return 0;
  1034. }
  1035. /*
  1036. * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
  1037. * @soc: DP SOC handle
  1038. * @srng: source ring structure
  1039. * @ring_type: type of ring
  1040. * @ring_num: ring number
  1041. *
  1042. * Return: None
  1043. */
  1044. static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  1045. int ring_type, int ring_num)
  1046. {
  1047. }
  1048. /**
  1049. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  1050. * Any buffers allocated and attached to ring entries are expected to be freed
  1051. * before calling this function.
  1052. */
  1053. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  1054. int ring_type, int ring_num)
  1055. {
  1056. if (!srng->hal_srng) {
  1057. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1058. FL("Ring type: %d, num:%d not setup"),
  1059. ring_type, ring_num);
  1060. return;
  1061. }
  1062. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  1063. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1064. srng->alloc_size,
  1065. srng->base_vaddr_unaligned,
  1066. srng->base_paddr_unaligned, 0);
  1067. srng->hal_srng = NULL;
  1068. }
  1069. /* TODO: Need this interface from HIF */
  1070. void *hif_get_hal_handle(void *hif_handle);
  1071. /*
  1072. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  1073. * @dp_ctx: DP SOC handle
  1074. * @budget: Number of frames/descriptors that can be processed in one shot
  1075. *
  1076. * Return: remaining budget/quota for the soc device
  1077. */
  1078. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  1079. {
  1080. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  1081. struct dp_soc *soc = int_ctx->soc;
  1082. int ring = 0;
  1083. uint32_t work_done = 0;
  1084. int budget = dp_budget;
  1085. uint8_t tx_mask = int_ctx->tx_ring_mask;
  1086. uint8_t rx_mask = int_ctx->rx_ring_mask;
  1087. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  1088. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  1089. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  1090. uint32_t remaining_quota = dp_budget;
  1091. struct dp_pdev *pdev = NULL;
  1092. int mac_id;
  1093. /* Process Tx completion interrupts first to return back buffers */
  1094. while (tx_mask) {
  1095. if (tx_mask & 0x1) {
  1096. work_done = dp_tx_comp_handler(soc,
  1097. soc->tx_comp_ring[ring].hal_srng,
  1098. remaining_quota);
  1099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1100. "tx mask 0x%x ring %d, budget %d, work_done %d",
  1101. tx_mask, ring, budget, work_done);
  1102. budget -= work_done;
  1103. if (budget <= 0)
  1104. goto budget_done;
  1105. remaining_quota = budget;
  1106. }
  1107. tx_mask = tx_mask >> 1;
  1108. ring++;
  1109. }
  1110. /* Process REO Exception ring interrupt */
  1111. if (rx_err_mask) {
  1112. work_done = dp_rx_err_process(soc,
  1113. soc->reo_exception_ring.hal_srng,
  1114. remaining_quota);
  1115. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1116. "REO Exception Ring: work_done %d budget %d",
  1117. work_done, budget);
  1118. budget -= work_done;
  1119. if (budget <= 0) {
  1120. goto budget_done;
  1121. }
  1122. remaining_quota = budget;
  1123. }
  1124. /* Process Rx WBM release ring interrupt */
  1125. if (rx_wbm_rel_mask) {
  1126. work_done = dp_rx_wbm_err_process(soc,
  1127. soc->rx_rel_ring.hal_srng, remaining_quota);
  1128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1129. "WBM Release Ring: work_done %d budget %d",
  1130. work_done, budget);
  1131. budget -= work_done;
  1132. if (budget <= 0) {
  1133. goto budget_done;
  1134. }
  1135. remaining_quota = budget;
  1136. }
  1137. /* Process Rx interrupts */
  1138. if (rx_mask) {
  1139. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  1140. if (rx_mask & (1 << ring)) {
  1141. work_done = dp_rx_process(int_ctx,
  1142. soc->reo_dest_ring[ring].hal_srng,
  1143. ring,
  1144. remaining_quota);
  1145. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1146. "rx mask 0x%x ring %d, work_done %d budget %d",
  1147. rx_mask, ring, work_done, budget);
  1148. budget -= work_done;
  1149. if (budget <= 0)
  1150. goto budget_done;
  1151. remaining_quota = budget;
  1152. }
  1153. }
  1154. }
  1155. if (reo_status_mask)
  1156. dp_reo_status_ring_handler(soc);
  1157. /* Process LMAC interrupts */
  1158. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  1159. pdev = soc->pdev_list[ring];
  1160. if (pdev == NULL)
  1161. continue;
  1162. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  1163. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  1164. pdev->pdev_id);
  1165. if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
  1166. work_done = dp_mon_process(soc, mac_for_pdev,
  1167. remaining_quota);
  1168. budget -= work_done;
  1169. if (budget <= 0)
  1170. goto budget_done;
  1171. remaining_quota = budget;
  1172. }
  1173. if (int_ctx->rxdma2host_ring_mask &
  1174. (1 << mac_for_pdev)) {
  1175. work_done = dp_rxdma_err_process(soc,
  1176. mac_for_pdev,
  1177. remaining_quota);
  1178. budget -= work_done;
  1179. if (budget <= 0)
  1180. goto budget_done;
  1181. remaining_quota = budget;
  1182. }
  1183. if (int_ctx->host2rxdma_ring_mask &
  1184. (1 << mac_for_pdev)) {
  1185. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1186. union dp_rx_desc_list_elem_t *tail = NULL;
  1187. struct dp_srng *rx_refill_buf_ring =
  1188. &pdev->rx_refill_buf_ring;
  1189. DP_STATS_INC(pdev, replenish.low_thresh_intrs,
  1190. 1);
  1191. dp_rx_buffers_replenish(soc, mac_for_pdev,
  1192. rx_refill_buf_ring,
  1193. &soc->rx_desc_buf[mac_for_pdev], 0,
  1194. &desc_list, &tail);
  1195. }
  1196. }
  1197. }
  1198. qdf_lro_flush(int_ctx->lro_ctx);
  1199. budget_done:
  1200. return dp_budget - budget;
  1201. }
  1202. /* dp_interrupt_timer()- timer poll for interrupts
  1203. *
  1204. * @arg: SoC Handle
  1205. *
  1206. * Return:
  1207. *
  1208. */
  1209. static void dp_interrupt_timer(void *arg)
  1210. {
  1211. struct dp_soc *soc = (struct dp_soc *) arg;
  1212. int i;
  1213. if (qdf_atomic_read(&soc->cmn_init_done)) {
  1214. for (i = 0;
  1215. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  1216. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  1217. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1218. }
  1219. }
  1220. /*
  1221. * dp_soc_attach_poll() - Register handlers for DP interrupts
  1222. * @txrx_soc: DP SOC handle
  1223. *
  1224. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1225. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1226. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1227. *
  1228. * Return: 0 for success, nonzero for failure.
  1229. */
  1230. static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
  1231. {
  1232. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1233. int i;
  1234. soc->intr_mode = DP_INTR_POLL;
  1235. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1236. soc->intr_ctx[i].dp_intr_id = i;
  1237. soc->intr_ctx[i].tx_ring_mask =
  1238. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1239. soc->intr_ctx[i].rx_ring_mask =
  1240. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1241. soc->intr_ctx[i].rx_mon_ring_mask =
  1242. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  1243. soc->intr_ctx[i].rx_err_ring_mask =
  1244. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1245. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  1246. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1247. soc->intr_ctx[i].reo_status_ring_mask =
  1248. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1249. soc->intr_ctx[i].rxdma2host_ring_mask =
  1250. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1251. soc->intr_ctx[i].soc = soc;
  1252. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1253. }
  1254. qdf_timer_init(soc->osdev, &soc->int_timer,
  1255. dp_interrupt_timer, (void *)soc,
  1256. QDF_TIMER_TYPE_WAKE_APPS);
  1257. return QDF_STATUS_SUCCESS;
  1258. }
  1259. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  1260. #if defined(CONFIG_MCL)
  1261. /*
  1262. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  1263. * @txrx_soc: DP SOC handle
  1264. *
  1265. * Call the appropriate attach function based on the mode of operation.
  1266. * This is a WAR for enabling monitor mode.
  1267. *
  1268. * Return: 0 for success. nonzero for failure.
  1269. */
  1270. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1271. {
  1272. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1273. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  1274. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  1275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1276. "%s: Poll mode", __func__);
  1277. return dp_soc_attach_poll(txrx_soc);
  1278. } else {
  1279. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1280. "%s: Interrupt mode", __func__);
  1281. return dp_soc_interrupt_attach(txrx_soc);
  1282. }
  1283. }
  1284. #else
  1285. #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
  1286. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1287. {
  1288. return dp_soc_attach_poll(txrx_soc);
  1289. }
  1290. #else
  1291. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  1292. {
  1293. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1294. if (hif_is_polled_mode_enabled(soc->hif_handle))
  1295. return dp_soc_attach_poll(txrx_soc);
  1296. else
  1297. return dp_soc_interrupt_attach(txrx_soc);
  1298. }
  1299. #endif
  1300. #endif
  1301. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  1302. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  1303. {
  1304. int j;
  1305. int num_irq = 0;
  1306. int tx_mask =
  1307. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1308. int rx_mask =
  1309. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1310. int rx_mon_mask =
  1311. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  1312. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1313. soc->wlan_cfg_ctx, intr_ctx_num);
  1314. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1315. soc->wlan_cfg_ctx, intr_ctx_num);
  1316. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1317. soc->wlan_cfg_ctx, intr_ctx_num);
  1318. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1319. soc->wlan_cfg_ctx, intr_ctx_num);
  1320. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  1321. soc->wlan_cfg_ctx, intr_ctx_num);
  1322. int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
  1323. soc->wlan_cfg_ctx, intr_ctx_num);
  1324. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  1325. if (tx_mask & (1 << j)) {
  1326. irq_id_map[num_irq++] =
  1327. (wbm2host_tx_completions_ring1 - j);
  1328. }
  1329. if (rx_mask & (1 << j)) {
  1330. irq_id_map[num_irq++] =
  1331. (reo2host_destination_ring1 - j);
  1332. }
  1333. if (rxdma2host_ring_mask & (1 << j)) {
  1334. irq_id_map[num_irq++] =
  1335. rxdma2host_destination_ring_mac1 -
  1336. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1337. }
  1338. if (host2rxdma_ring_mask & (1 << j)) {
  1339. irq_id_map[num_irq++] =
  1340. host2rxdma_host_buf_ring_mac1 -
  1341. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1342. }
  1343. if (host2rxdma_mon_ring_mask & (1 << j)) {
  1344. irq_id_map[num_irq++] =
  1345. host2rxdma_monitor_ring1 -
  1346. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1347. }
  1348. if (rx_mon_mask & (1 << j)) {
  1349. irq_id_map[num_irq++] =
  1350. ppdu_end_interrupts_mac1 -
  1351. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1352. irq_id_map[num_irq++] =
  1353. rxdma2host_monitor_status_ring_mac1 -
  1354. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1355. }
  1356. if (rx_wbm_rel_ring_mask & (1 << j))
  1357. irq_id_map[num_irq++] = wbm2host_rx_release;
  1358. if (rx_err_ring_mask & (1 << j))
  1359. irq_id_map[num_irq++] = reo2host_exception;
  1360. if (reo_status_ring_mask & (1 << j))
  1361. irq_id_map[num_irq++] = reo2host_status;
  1362. }
  1363. *num_irq_r = num_irq;
  1364. }
  1365. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  1366. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  1367. int msi_vector_count, int msi_vector_start)
  1368. {
  1369. int tx_mask = wlan_cfg_get_tx_ring_mask(
  1370. soc->wlan_cfg_ctx, intr_ctx_num);
  1371. int rx_mask = wlan_cfg_get_rx_ring_mask(
  1372. soc->wlan_cfg_ctx, intr_ctx_num);
  1373. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  1374. soc->wlan_cfg_ctx, intr_ctx_num);
  1375. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  1376. soc->wlan_cfg_ctx, intr_ctx_num);
  1377. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  1378. soc->wlan_cfg_ctx, intr_ctx_num);
  1379. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  1380. soc->wlan_cfg_ctx, intr_ctx_num);
  1381. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  1382. soc->wlan_cfg_ctx, intr_ctx_num);
  1383. unsigned int vector =
  1384. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  1385. int num_irq = 0;
  1386. soc->intr_mode = DP_INTR_MSI;
  1387. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  1388. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  1389. irq_id_map[num_irq++] =
  1390. pld_get_msi_irq(soc->osdev->dev, vector);
  1391. *num_irq_r = num_irq;
  1392. }
  1393. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  1394. int *irq_id_map, int *num_irq)
  1395. {
  1396. int msi_vector_count, ret;
  1397. uint32_t msi_base_data, msi_vector_start;
  1398. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  1399. &msi_vector_count,
  1400. &msi_base_data,
  1401. &msi_vector_start);
  1402. if (ret)
  1403. return dp_soc_interrupt_map_calculate_integrated(soc,
  1404. intr_ctx_num, irq_id_map, num_irq);
  1405. else
  1406. dp_soc_interrupt_map_calculate_msi(soc,
  1407. intr_ctx_num, irq_id_map, num_irq,
  1408. msi_vector_count, msi_vector_start);
  1409. }
  1410. /*
  1411. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  1412. * @txrx_soc: DP SOC handle
  1413. *
  1414. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  1415. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  1416. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  1417. *
  1418. * Return: 0 for success. nonzero for failure.
  1419. */
  1420. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  1421. {
  1422. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1423. int i = 0;
  1424. int num_irq = 0;
  1425. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1426. int ret = 0;
  1427. /* Map of IRQ ids registered with one interrupt context */
  1428. int irq_id_map[HIF_MAX_GRP_IRQ];
  1429. int tx_mask =
  1430. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  1431. int rx_mask =
  1432. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  1433. int rx_mon_mask =
  1434. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  1435. int rx_err_ring_mask =
  1436. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  1437. int rx_wbm_rel_ring_mask =
  1438. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  1439. int reo_status_ring_mask =
  1440. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  1441. int rxdma2host_ring_mask =
  1442. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  1443. int host2rxdma_ring_mask =
  1444. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  1445. int host2rxdma_mon_ring_mask =
  1446. wlan_cfg_get_host2rxdma_mon_ring_mask(
  1447. soc->wlan_cfg_ctx, i);
  1448. soc->intr_ctx[i].dp_intr_id = i;
  1449. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  1450. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  1451. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  1452. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  1453. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  1454. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  1455. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  1456. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  1457. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  1458. host2rxdma_mon_ring_mask;
  1459. soc->intr_ctx[i].soc = soc;
  1460. num_irq = 0;
  1461. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  1462. &num_irq);
  1463. ret = hif_register_ext_group(soc->hif_handle,
  1464. num_irq, irq_id_map, dp_service_srngs,
  1465. &soc->intr_ctx[i], "dp_intr",
  1466. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  1467. if (ret) {
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1469. FL("failed, ret = %d"), ret);
  1470. return QDF_STATUS_E_FAILURE;
  1471. }
  1472. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  1473. }
  1474. hif_configure_ext_group_interrupts(soc->hif_handle);
  1475. return QDF_STATUS_SUCCESS;
  1476. }
  1477. /*
  1478. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  1479. * @txrx_soc: DP SOC handle
  1480. *
  1481. * Return: void
  1482. */
  1483. static void dp_soc_interrupt_detach(void *txrx_soc)
  1484. {
  1485. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1486. int i;
  1487. if (soc->intr_mode == DP_INTR_POLL) {
  1488. qdf_timer_stop(&soc->int_timer);
  1489. qdf_timer_free(&soc->int_timer);
  1490. } else {
  1491. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  1492. }
  1493. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  1494. soc->intr_ctx[i].tx_ring_mask = 0;
  1495. soc->intr_ctx[i].rx_ring_mask = 0;
  1496. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  1497. soc->intr_ctx[i].rx_err_ring_mask = 0;
  1498. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  1499. soc->intr_ctx[i].reo_status_ring_mask = 0;
  1500. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  1501. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  1502. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  1503. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  1504. }
  1505. }
  1506. #define AVG_MAX_MPDUS_PER_TID 128
  1507. #define AVG_TIDS_PER_CLIENT 2
  1508. #define AVG_FLOWS_PER_TID 2
  1509. #define AVG_MSDUS_PER_FLOW 128
  1510. #define AVG_MSDUS_PER_MPDU 4
  1511. /*
  1512. * Allocate and setup link descriptor pool that will be used by HW for
  1513. * various link and queue descriptors and managed by WBM
  1514. */
  1515. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  1516. {
  1517. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  1518. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  1519. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  1520. uint32_t num_mpdus_per_link_desc =
  1521. hal_num_mpdus_per_link_desc(soc->hal_soc);
  1522. uint32_t num_msdus_per_link_desc =
  1523. hal_num_msdus_per_link_desc(soc->hal_soc);
  1524. uint32_t num_mpdu_links_per_queue_desc =
  1525. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  1526. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1527. uint32_t total_link_descs, total_mem_size;
  1528. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  1529. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  1530. uint32_t num_link_desc_banks;
  1531. uint32_t last_bank_size = 0;
  1532. uint32_t entry_size, num_entries;
  1533. int i;
  1534. uint32_t desc_id = 0;
  1535. qdf_dma_addr_t *baseaddr = NULL;
  1536. /* Only Tx queue descriptors are allocated from common link descriptor
  1537. * pool Rx queue descriptors are not included in this because (REO queue
  1538. * extension descriptors) they are expected to be allocated contiguously
  1539. * with REO queue descriptors
  1540. */
  1541. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1542. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  1543. num_mpdu_queue_descs = num_mpdu_link_descs /
  1544. num_mpdu_links_per_queue_desc;
  1545. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1546. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1547. num_msdus_per_link_desc;
  1548. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1549. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1550. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1551. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1552. /* Round up to power of 2 */
  1553. total_link_descs = 1;
  1554. while (total_link_descs < num_entries)
  1555. total_link_descs <<= 1;
  1556. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1557. FL("total_link_descs: %u, link_desc_size: %d"),
  1558. total_link_descs, link_desc_size);
  1559. total_mem_size = total_link_descs * link_desc_size;
  1560. total_mem_size += link_desc_align;
  1561. if (total_mem_size <= max_alloc_size) {
  1562. num_link_desc_banks = 0;
  1563. last_bank_size = total_mem_size;
  1564. } else {
  1565. num_link_desc_banks = (total_mem_size) /
  1566. (max_alloc_size - link_desc_align);
  1567. last_bank_size = total_mem_size %
  1568. (max_alloc_size - link_desc_align);
  1569. }
  1570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1571. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1572. total_mem_size, num_link_desc_banks);
  1573. for (i = 0; i < num_link_desc_banks; i++) {
  1574. if (!soc->dp_soc_reinit) {
  1575. baseaddr = &soc->link_desc_banks[i].
  1576. base_paddr_unaligned;
  1577. soc->link_desc_banks[i].base_vaddr_unaligned =
  1578. qdf_mem_alloc_consistent(soc->osdev,
  1579. soc->osdev->dev,
  1580. max_alloc_size,
  1581. baseaddr);
  1582. }
  1583. soc->link_desc_banks[i].size = max_alloc_size;
  1584. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1585. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1586. ((unsigned long)(
  1587. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1588. link_desc_align));
  1589. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1590. soc->link_desc_banks[i].base_paddr_unaligned) +
  1591. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1592. (unsigned long)(
  1593. soc->link_desc_banks[i].base_vaddr_unaligned));
  1594. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1595. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1596. FL("Link descriptor memory alloc failed"));
  1597. goto fail;
  1598. }
  1599. }
  1600. if (last_bank_size) {
  1601. /* Allocate last bank in case total memory required is not exact
  1602. * multiple of max_alloc_size
  1603. */
  1604. if (!soc->dp_soc_reinit) {
  1605. baseaddr = &soc->link_desc_banks[i].
  1606. base_paddr_unaligned;
  1607. soc->link_desc_banks[i].base_vaddr_unaligned =
  1608. qdf_mem_alloc_consistent(soc->osdev,
  1609. soc->osdev->dev,
  1610. last_bank_size,
  1611. baseaddr);
  1612. }
  1613. soc->link_desc_banks[i].size = last_bank_size;
  1614. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1615. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1616. ((unsigned long)(
  1617. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1618. link_desc_align));
  1619. soc->link_desc_banks[i].base_paddr =
  1620. (unsigned long)(
  1621. soc->link_desc_banks[i].base_paddr_unaligned) +
  1622. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1623. (unsigned long)(
  1624. soc->link_desc_banks[i].base_vaddr_unaligned));
  1625. }
  1626. /* Allocate and setup link descriptor idle list for HW internal use */
  1627. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1628. total_mem_size = entry_size * total_link_descs;
  1629. if (total_mem_size <= max_alloc_size) {
  1630. void *desc;
  1631. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1632. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1633. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1634. FL("Link desc idle ring setup failed"));
  1635. goto fail;
  1636. }
  1637. hal_srng_access_start_unlocked(soc->hal_soc,
  1638. soc->wbm_idle_link_ring.hal_srng);
  1639. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1640. soc->link_desc_banks[i].base_paddr; i++) {
  1641. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1642. ((unsigned long)(
  1643. soc->link_desc_banks[i].base_vaddr) -
  1644. (unsigned long)(
  1645. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1646. / link_desc_size;
  1647. unsigned long paddr = (unsigned long)(
  1648. soc->link_desc_banks[i].base_paddr);
  1649. while (num_entries && (desc = hal_srng_src_get_next(
  1650. soc->hal_soc,
  1651. soc->wbm_idle_link_ring.hal_srng))) {
  1652. hal_set_link_desc_addr(desc,
  1653. LINK_DESC_COOKIE(desc_id, i), paddr);
  1654. num_entries--;
  1655. desc_id++;
  1656. paddr += link_desc_size;
  1657. }
  1658. }
  1659. hal_srng_access_end_unlocked(soc->hal_soc,
  1660. soc->wbm_idle_link_ring.hal_srng);
  1661. } else {
  1662. uint32_t num_scatter_bufs;
  1663. uint32_t num_entries_per_buf;
  1664. uint32_t rem_entries;
  1665. uint8_t *scatter_buf_ptr;
  1666. uint16_t scatter_buf_num;
  1667. uint32_t buf_size = 0;
  1668. soc->wbm_idle_scatter_buf_size =
  1669. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1670. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1671. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1672. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1673. soc->hal_soc, total_mem_size,
  1674. soc->wbm_idle_scatter_buf_size);
  1675. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1676. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1677. FL("scatter bufs size out of bounds"));
  1678. goto fail;
  1679. }
  1680. for (i = 0; i < num_scatter_bufs; i++) {
  1681. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1682. if (!soc->dp_soc_reinit) {
  1683. buf_size = soc->wbm_idle_scatter_buf_size;
  1684. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1685. qdf_mem_alloc_consistent(soc->osdev,
  1686. soc->osdev->
  1687. dev,
  1688. buf_size,
  1689. baseaddr);
  1690. }
  1691. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1692. QDF_TRACE(QDF_MODULE_ID_DP,
  1693. QDF_TRACE_LEVEL_ERROR,
  1694. FL("Scatter lst memory alloc fail"));
  1695. goto fail;
  1696. }
  1697. }
  1698. /* Populate idle list scatter buffers with link descriptor
  1699. * pointers
  1700. */
  1701. scatter_buf_num = 0;
  1702. scatter_buf_ptr = (uint8_t *)(
  1703. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1704. rem_entries = num_entries_per_buf;
  1705. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1706. soc->link_desc_banks[i].base_paddr; i++) {
  1707. uint32_t num_link_descs =
  1708. (soc->link_desc_banks[i].size -
  1709. ((unsigned long)(
  1710. soc->link_desc_banks[i].base_vaddr) -
  1711. (unsigned long)(
  1712. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1713. / link_desc_size;
  1714. unsigned long paddr = (unsigned long)(
  1715. soc->link_desc_banks[i].base_paddr);
  1716. while (num_link_descs) {
  1717. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1718. LINK_DESC_COOKIE(desc_id, i), paddr);
  1719. num_link_descs--;
  1720. desc_id++;
  1721. paddr += link_desc_size;
  1722. rem_entries--;
  1723. if (rem_entries) {
  1724. scatter_buf_ptr += entry_size;
  1725. } else {
  1726. rem_entries = num_entries_per_buf;
  1727. scatter_buf_num++;
  1728. if (scatter_buf_num >= num_scatter_bufs)
  1729. break;
  1730. scatter_buf_ptr = (uint8_t *)(
  1731. soc->wbm_idle_scatter_buf_base_vaddr[
  1732. scatter_buf_num]);
  1733. }
  1734. }
  1735. }
  1736. /* Setup link descriptor idle list in HW */
  1737. hal_setup_link_idle_list(soc->hal_soc,
  1738. soc->wbm_idle_scatter_buf_base_paddr,
  1739. soc->wbm_idle_scatter_buf_base_vaddr,
  1740. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1741. (uint32_t)(scatter_buf_ptr -
  1742. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1743. scatter_buf_num-1])), total_link_descs);
  1744. }
  1745. return 0;
  1746. fail:
  1747. if (soc->wbm_idle_link_ring.hal_srng) {
  1748. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1749. WBM_IDLE_LINK, 0);
  1750. }
  1751. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1752. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1753. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1754. soc->wbm_idle_scatter_buf_size,
  1755. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1756. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1757. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1758. }
  1759. }
  1760. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1761. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1762. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1763. soc->link_desc_banks[i].size,
  1764. soc->link_desc_banks[i].base_vaddr_unaligned,
  1765. soc->link_desc_banks[i].base_paddr_unaligned,
  1766. 0);
  1767. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1768. }
  1769. }
  1770. return QDF_STATUS_E_FAILURE;
  1771. }
  1772. /*
  1773. * Free link descriptor pool that was setup HW
  1774. */
  1775. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1776. {
  1777. int i;
  1778. if (soc->wbm_idle_link_ring.hal_srng) {
  1779. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1780. WBM_IDLE_LINK, 0);
  1781. }
  1782. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1783. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1784. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1785. soc->wbm_idle_scatter_buf_size,
  1786. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1787. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1788. soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
  1789. }
  1790. }
  1791. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1792. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1793. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1794. soc->link_desc_banks[i].size,
  1795. soc->link_desc_banks[i].base_vaddr_unaligned,
  1796. soc->link_desc_banks[i].base_paddr_unaligned,
  1797. 0);
  1798. soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
  1799. }
  1800. }
  1801. }
  1802. #ifdef IPA_OFFLOAD
  1803. #define REO_DST_RING_SIZE_QCA6290 1023
  1804. #ifndef QCA_WIFI_QCA8074_VP
  1805. #define REO_DST_RING_SIZE_QCA8074 1023
  1806. #else
  1807. #define REO_DST_RING_SIZE_QCA8074 8
  1808. #endif /* QCA_WIFI_QCA8074_VP */
  1809. #else
  1810. #define REO_DST_RING_SIZE_QCA6290 1024
  1811. #ifndef QCA_WIFI_QCA8074_VP
  1812. #define REO_DST_RING_SIZE_QCA8074 2048
  1813. #else
  1814. #define REO_DST_RING_SIZE_QCA8074 8
  1815. #endif /* QCA_WIFI_QCA8074_VP */
  1816. #endif /* IPA_OFFLOAD */
  1817. /*
  1818. * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
  1819. * @soc: Datapath SOC handle
  1820. *
  1821. * This is a timer function used to age out stale AST nodes from
  1822. * AST table
  1823. */
  1824. #ifdef FEATURE_WDS
  1825. static void dp_ast_aging_timer_fn(void *soc_hdl)
  1826. {
  1827. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1828. struct dp_pdev *pdev;
  1829. struct dp_vdev *vdev;
  1830. struct dp_peer *peer;
  1831. struct dp_ast_entry *ase, *temp_ase;
  1832. int i;
  1833. bool check_wds_ase = false;
  1834. if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
  1835. soc->wds_ast_aging_timer_cnt = 0;
  1836. check_wds_ase = true;
  1837. }
  1838. /* Peer list access lock */
  1839. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1840. /* AST list access lock */
  1841. qdf_spin_lock_bh(&soc->ast_lock);
  1842. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1843. pdev = soc->pdev_list[i];
  1844. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1845. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1846. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1847. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1848. /*
  1849. * Do not expire static ast entries
  1850. * and HM WDS entries
  1851. */
  1852. if (ase->type !=
  1853. CDP_TXRX_AST_TYPE_WDS &&
  1854. ase->type !=
  1855. CDP_TXRX_AST_TYPE_MEC &&
  1856. ase->type !=
  1857. CDP_TXRX_AST_TYPE_DA)
  1858. continue;
  1859. /* Expire MEC entry every n sec.
  1860. * This needs to be expired in
  1861. * case if STA backbone is made as
  1862. * AP backbone, In this case it needs
  1863. * to be re-added as a WDS entry.
  1864. */
  1865. if (ase->is_active && ase->type ==
  1866. CDP_TXRX_AST_TYPE_MEC) {
  1867. ase->is_active = FALSE;
  1868. continue;
  1869. } else if (ase->is_active &&
  1870. check_wds_ase) {
  1871. ase->is_active = FALSE;
  1872. continue;
  1873. }
  1874. if (ase->type ==
  1875. CDP_TXRX_AST_TYPE_MEC) {
  1876. DP_STATS_INC(soc,
  1877. ast.aged_out, 1);
  1878. dp_peer_del_ast(soc, ase);
  1879. } else if (check_wds_ase) {
  1880. DP_STATS_INC(soc,
  1881. ast.aged_out, 1);
  1882. dp_peer_del_ast(soc, ase);
  1883. }
  1884. }
  1885. }
  1886. }
  1887. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1888. }
  1889. qdf_spin_unlock_bh(&soc->ast_lock);
  1890. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1891. if (qdf_atomic_read(&soc->cmn_init_done))
  1892. qdf_timer_mod(&soc->ast_aging_timer,
  1893. DP_AST_AGING_TIMER_DEFAULT_MS);
  1894. }
  1895. /*
  1896. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1897. * @soc: Datapath SOC handle
  1898. *
  1899. * Return: None
  1900. */
  1901. static void dp_soc_wds_attach(struct dp_soc *soc)
  1902. {
  1903. soc->wds_ast_aging_timer_cnt = 0;
  1904. qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
  1905. dp_ast_aging_timer_fn, (void *)soc,
  1906. QDF_TIMER_TYPE_WAKE_APPS);
  1907. qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
  1908. }
  1909. /*
  1910. * dp_soc_wds_detach() - Detach WDS data structures and timers
  1911. * @txrx_soc: DP SOC handle
  1912. *
  1913. * Return: None
  1914. */
  1915. static void dp_soc_wds_detach(struct dp_soc *soc)
  1916. {
  1917. qdf_timer_stop(&soc->ast_aging_timer);
  1918. qdf_timer_free(&soc->ast_aging_timer);
  1919. }
  1920. #else
  1921. static void dp_soc_wds_attach(struct dp_soc *soc)
  1922. {
  1923. }
  1924. static void dp_soc_wds_detach(struct dp_soc *soc)
  1925. {
  1926. }
  1927. #endif
  1928. /*
  1929. * dp_soc_reset_ring_map() - Reset cpu ring map
  1930. * @soc: Datapath soc handler
  1931. *
  1932. * This api resets the default cpu ring map
  1933. */
  1934. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  1935. {
  1936. uint8_t i;
  1937. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1938. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  1939. switch (nss_config) {
  1940. case dp_nss_cfg_first_radio:
  1941. /*
  1942. * Setting Tx ring map for one nss offloaded radio
  1943. */
  1944. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  1945. break;
  1946. case dp_nss_cfg_second_radio:
  1947. /*
  1948. * Setting Tx ring for two nss offloaded radios
  1949. */
  1950. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  1951. break;
  1952. case dp_nss_cfg_dbdc:
  1953. /*
  1954. * Setting Tx ring map for 2 nss offloaded radios
  1955. */
  1956. soc->tx_ring_map[i] =
  1957. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  1958. break;
  1959. case dp_nss_cfg_dbtc:
  1960. /*
  1961. * Setting Tx ring map for 3 nss offloaded radios
  1962. */
  1963. soc->tx_ring_map[i] =
  1964. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  1965. break;
  1966. default:
  1967. dp_err("tx_ring_map failed due to invalid nss cfg");
  1968. break;
  1969. }
  1970. }
  1971. }
  1972. /*
  1973. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  1974. * @dp_soc - DP soc handle
  1975. * @ring_type - ring type
  1976. * @ring_num - ring_num
  1977. *
  1978. * return 0 or 1
  1979. */
  1980. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  1981. {
  1982. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1983. uint8_t status = 0;
  1984. switch (ring_type) {
  1985. case WBM2SW_RELEASE:
  1986. case REO_DST:
  1987. case RXDMA_BUF:
  1988. status = ((nss_config) & (1 << ring_num));
  1989. break;
  1990. default:
  1991. break;
  1992. }
  1993. return status;
  1994. }
  1995. /*
  1996. * dp_soc_reset_intr_mask() - reset interrupt mask
  1997. * @dp_soc - DP Soc handle
  1998. *
  1999. * Return: Return void
  2000. */
  2001. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  2002. {
  2003. uint8_t j;
  2004. int *grp_mask = NULL;
  2005. int group_number, mask, num_ring;
  2006. /* number of tx ring */
  2007. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  2008. /*
  2009. * group mask for tx completion ring.
  2010. */
  2011. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  2012. /* loop and reset the mask for only offloaded ring */
  2013. for (j = 0; j < num_ring; j++) {
  2014. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  2015. continue;
  2016. }
  2017. /*
  2018. * Group number corresponding to tx offloaded ring.
  2019. */
  2020. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2021. if (group_number < 0) {
  2022. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2023. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2024. WBM2SW_RELEASE, j);
  2025. return;
  2026. }
  2027. /* reset the tx mask for offloaded ring */
  2028. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2029. mask &= (~(1 << j));
  2030. /*
  2031. * reset the interrupt mask for offloaded ring.
  2032. */
  2033. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2034. }
  2035. /* number of rx rings */
  2036. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2037. /*
  2038. * group mask for reo destination ring.
  2039. */
  2040. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  2041. /* loop and reset the mask for only offloaded ring */
  2042. for (j = 0; j < num_ring; j++) {
  2043. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  2044. continue;
  2045. }
  2046. /*
  2047. * Group number corresponding to rx offloaded ring.
  2048. */
  2049. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2050. if (group_number < 0) {
  2051. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2052. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2053. REO_DST, j);
  2054. return;
  2055. }
  2056. /* set the interrupt mask for offloaded ring */
  2057. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  2058. mask &= (~(1 << j));
  2059. /*
  2060. * set the interrupt mask to zero for rx offloaded radio.
  2061. */
  2062. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  2063. }
  2064. /*
  2065. * group mask for Rx buffer refill ring
  2066. */
  2067. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  2068. /* loop and reset the mask for only offloaded ring */
  2069. for (j = 0; j < MAX_PDEV_CNT; j++) {
  2070. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  2071. continue;
  2072. }
  2073. /*
  2074. * Group number corresponding to rx offloaded ring.
  2075. */
  2076. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  2077. if (group_number < 0) {
  2078. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2079. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  2080. REO_DST, j);
  2081. return;
  2082. }
  2083. /* set the interrupt mask for offloaded ring */
  2084. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2085. group_number);
  2086. mask &= (~(1 << j));
  2087. /*
  2088. * set the interrupt mask to zero for rx offloaded radio.
  2089. */
  2090. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  2091. group_number, mask);
  2092. }
  2093. }
  2094. #ifdef IPA_OFFLOAD
  2095. /**
  2096. * dp_reo_remap_config() - configure reo remap register value based
  2097. * nss configuration.
  2098. * based on offload_radio value below remap configuration
  2099. * get applied.
  2100. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  2101. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  2102. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  2103. * 3 - both Radios handled by NSS (remap not required)
  2104. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  2105. *
  2106. * @remap1: output parameter indicates reo remap 1 register value
  2107. * @remap2: output parameter indicates reo remap 2 register value
  2108. * Return: bool type, true if remap is configured else false.
  2109. */
  2110. static bool dp_reo_remap_config(struct dp_soc *soc,
  2111. uint32_t *remap1,
  2112. uint32_t *remap2)
  2113. {
  2114. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  2115. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  2116. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  2117. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  2118. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  2119. return true;
  2120. }
  2121. #else
  2122. static bool dp_reo_remap_config(struct dp_soc *soc,
  2123. uint32_t *remap1,
  2124. uint32_t *remap2)
  2125. {
  2126. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2127. switch (offload_radio) {
  2128. case dp_nss_cfg_default:
  2129. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2130. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2131. (0x3 << 18) | (0x4 << 21)) << 8;
  2132. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  2133. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  2134. (0x3 << 18) | (0x4 << 21)) << 8;
  2135. break;
  2136. case dp_nss_cfg_first_radio:
  2137. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  2138. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  2139. (0x2 << 18) | (0x3 << 21)) << 8;
  2140. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  2141. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  2142. (0x4 << 18) | (0x2 << 21)) << 8;
  2143. break;
  2144. case dp_nss_cfg_second_radio:
  2145. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  2146. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  2147. (0x1 << 18) | (0x3 << 21)) << 8;
  2148. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  2149. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  2150. (0x4 << 18) | (0x1 << 21)) << 8;
  2151. break;
  2152. case dp_nss_cfg_dbdc:
  2153. case dp_nss_cfg_dbtc:
  2154. /* return false if both or all are offloaded to NSS */
  2155. return false;
  2156. }
  2157. dp_debug("remap1 %x remap2 %x offload_radio %u",
  2158. *remap1, *remap2, offload_radio);
  2159. return true;
  2160. }
  2161. #endif
  2162. /*
  2163. * dp_reo_frag_dst_set() - configure reo register to set the
  2164. * fragment destination ring
  2165. * @soc : Datapath soc
  2166. * @frag_dst_ring : output parameter to set fragment destination ring
  2167. *
  2168. * Based on offload_radio below fragment destination rings is selected
  2169. * 0 - TCL
  2170. * 1 - SW1
  2171. * 2 - SW2
  2172. * 3 - SW3
  2173. * 4 - SW4
  2174. * 5 - Release
  2175. * 6 - FW
  2176. * 7 - alternate select
  2177. *
  2178. * return: void
  2179. */
  2180. static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  2181. {
  2182. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  2183. switch (offload_radio) {
  2184. case dp_nss_cfg_default:
  2185. *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
  2186. break;
  2187. case dp_nss_cfg_dbdc:
  2188. case dp_nss_cfg_dbtc:
  2189. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  2190. break;
  2191. default:
  2192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2193. FL("dp_reo_frag_dst_set invalid offload radio config"));
  2194. break;
  2195. }
  2196. }
  2197. /*
  2198. * dp_soc_cmn_setup() - Common SoC level initializion
  2199. * @soc: Datapath SOC handle
  2200. *
  2201. * This is an internal function used to setup common SOC data structures,
  2202. * to be called from PDEV attach after receiving HW mode capabilities from FW
  2203. */
  2204. static int dp_soc_cmn_setup(struct dp_soc *soc)
  2205. {
  2206. int i;
  2207. struct hal_reo_params reo_params;
  2208. int tx_ring_size;
  2209. int tx_comp_ring_size;
  2210. int reo_dst_ring_size;
  2211. uint32_t entries;
  2212. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2213. if (qdf_atomic_read(&soc->cmn_init_done))
  2214. return 0;
  2215. if (dp_hw_link_desc_pool_setup(soc))
  2216. goto fail1;
  2217. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2218. /* Setup SRNG rings */
  2219. /* Common rings */
  2220. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  2221. wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
  2222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2223. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  2224. goto fail1;
  2225. }
  2226. soc->num_tcl_data_rings = 0;
  2227. /* Tx data rings */
  2228. if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
  2229. soc->num_tcl_data_rings =
  2230. wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
  2231. tx_comp_ring_size =
  2232. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2233. tx_ring_size =
  2234. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2235. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2236. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  2237. TCL_DATA, i, 0, tx_ring_size)) {
  2238. QDF_TRACE(QDF_MODULE_ID_DP,
  2239. QDF_TRACE_LEVEL_ERROR,
  2240. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  2241. goto fail1;
  2242. }
  2243. /*
  2244. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  2245. * count
  2246. */
  2247. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  2248. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  2249. QDF_TRACE(QDF_MODULE_ID_DP,
  2250. QDF_TRACE_LEVEL_ERROR,
  2251. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  2252. goto fail1;
  2253. }
  2254. }
  2255. } else {
  2256. /* This will be incremented during per pdev ring setup */
  2257. soc->num_tcl_data_rings = 0;
  2258. }
  2259. if (dp_tx_soc_attach(soc)) {
  2260. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2261. FL("dp_tx_soc_attach failed"));
  2262. goto fail1;
  2263. }
  2264. entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
  2265. /* TCL command and status rings */
  2266. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  2267. entries)) {
  2268. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2269. FL("dp_srng_setup failed for tcl_cmd_ring"));
  2270. goto fail1;
  2271. }
  2272. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  2273. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  2274. entries)) {
  2275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2276. FL("dp_srng_setup failed for tcl_status_ring"));
  2277. goto fail1;
  2278. }
  2279. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2280. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  2281. * descriptors
  2282. */
  2283. /* Rx data rings */
  2284. if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2285. soc->num_reo_dest_rings =
  2286. wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
  2287. QDF_TRACE(QDF_MODULE_ID_DP,
  2288. QDF_TRACE_LEVEL_INFO,
  2289. FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
  2290. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2291. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  2292. i, 0, reo_dst_ring_size)) {
  2293. QDF_TRACE(QDF_MODULE_ID_DP,
  2294. QDF_TRACE_LEVEL_ERROR,
  2295. FL(RNG_ERR "reo_dest_ring [%d]"), i);
  2296. goto fail1;
  2297. }
  2298. }
  2299. } else {
  2300. /* This will be incremented during per pdev ring setup */
  2301. soc->num_reo_dest_rings = 0;
  2302. }
  2303. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2304. /* LMAC RxDMA to SW Rings configuration */
  2305. if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
  2306. /* Only valid for MCL */
  2307. struct dp_pdev *pdev = soc->pdev_list[0];
  2308. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2309. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  2310. RXDMA_DST, 0, i,
  2311. entries)) {
  2312. QDF_TRACE(QDF_MODULE_ID_DP,
  2313. QDF_TRACE_LEVEL_ERROR,
  2314. FL(RNG_ERR "rxdma_err_dst_ring"));
  2315. goto fail1;
  2316. }
  2317. }
  2318. }
  2319. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  2320. /* REO reinjection ring */
  2321. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  2322. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  2323. entries)) {
  2324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2325. FL("dp_srng_setup failed for reo_reinject_ring"));
  2326. goto fail1;
  2327. }
  2328. /* Rx release ring */
  2329. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  2330. wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
  2331. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2332. FL("dp_srng_setup failed for rx_rel_ring"));
  2333. goto fail1;
  2334. }
  2335. /* Rx exception ring */
  2336. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  2337. if (dp_srng_setup(soc, &soc->reo_exception_ring,
  2338. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
  2339. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2340. FL("dp_srng_setup failed for reo_exception_ring"));
  2341. goto fail1;
  2342. }
  2343. /* REO command and status rings */
  2344. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  2345. wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
  2346. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2347. FL("dp_srng_setup failed for reo_cmd_ring"));
  2348. goto fail1;
  2349. }
  2350. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  2351. TAILQ_INIT(&soc->rx.reo_cmd_list);
  2352. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  2353. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  2354. wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
  2355. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2356. FL("dp_srng_setup failed for reo_status_ring"));
  2357. goto fail1;
  2358. }
  2359. /* Reset the cpu ring map if radio is NSS offloaded */
  2360. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
  2361. dp_soc_reset_cpu_ring_map(soc);
  2362. dp_soc_reset_intr_mask(soc);
  2363. }
  2364. /* Setup HW REO */
  2365. qdf_mem_zero(&reo_params, sizeof(reo_params));
  2366. if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
  2367. /*
  2368. * Reo ring remap is not required if both radios
  2369. * are offloaded to NSS
  2370. */
  2371. if (!dp_reo_remap_config(soc,
  2372. &reo_params.remap1,
  2373. &reo_params.remap2))
  2374. goto out;
  2375. reo_params.rx_hash_enabled = true;
  2376. }
  2377. /* setup the global rx defrag waitlist */
  2378. TAILQ_INIT(&soc->rx.defrag.waitlist);
  2379. soc->rx.defrag.timeout_ms =
  2380. wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
  2381. soc->rx.flags.defrag_timeout_check =
  2382. wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
  2383. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  2384. out:
  2385. /*
  2386. * set the fragment destination ring
  2387. */
  2388. dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
  2389. hal_reo_setup(soc->hal_soc, &reo_params);
  2390. qdf_atomic_set(&soc->cmn_init_done, 1);
  2391. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  2392. return 0;
  2393. fail1:
  2394. /*
  2395. * Cleanup will be done as part of soc_detach, which will
  2396. * be called on pdev attach failure
  2397. */
  2398. return QDF_STATUS_E_FAILURE;
  2399. }
  2400. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  2401. static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2402. {
  2403. struct cdp_lro_hash_config lro_hash;
  2404. QDF_STATUS status;
  2405. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2406. !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
  2407. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  2408. dp_err("LRO, GRO and RX hash disabled");
  2409. return QDF_STATUS_E_FAILURE;
  2410. }
  2411. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  2412. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
  2413. wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
  2414. lro_hash.lro_enable = 1;
  2415. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  2416. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  2417. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  2418. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  2419. }
  2420. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  2421. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2422. LRO_IPV4_SEED_ARR_SZ));
  2423. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  2424. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2425. LRO_IPV6_SEED_ARR_SZ));
  2426. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  2427. if (!soc->cdp_soc.ol_ops->lro_hash_config) {
  2428. QDF_BUG(0);
  2429. dp_err("lro_hash_config not configured");
  2430. return QDF_STATUS_E_FAILURE;
  2431. }
  2432. status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
  2433. &lro_hash);
  2434. if (!QDF_IS_STATUS_SUCCESS(status)) {
  2435. dp_err("failed to send lro_hash_config to FW %u", status);
  2436. return status;
  2437. }
  2438. dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  2439. lro_hash.lro_enable, lro_hash.tcp_flag,
  2440. lro_hash.tcp_flag_mask);
  2441. dp_info("toeplitz_hash_ipv4:");
  2442. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2443. (void *)lro_hash.toeplitz_hash_ipv4,
  2444. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  2445. LRO_IPV4_SEED_ARR_SZ));
  2446. dp_info("toeplitz_hash_ipv6:");
  2447. qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2448. (void *)lro_hash.toeplitz_hash_ipv6,
  2449. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  2450. LRO_IPV6_SEED_ARR_SZ));
  2451. return status;
  2452. }
  2453. /*
  2454. * dp_rxdma_ring_setup() - configure the RX DMA rings
  2455. * @soc: data path SoC handle
  2456. * @pdev: Physical device handle
  2457. *
  2458. * Return: 0 - success, > 0 - failure
  2459. */
  2460. #ifdef QCA_HOST2FW_RXBUF_RING
  2461. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2462. struct dp_pdev *pdev)
  2463. {
  2464. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2465. int max_mac_rings;
  2466. int i;
  2467. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2468. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
  2469. for (i = 0; i < max_mac_rings; i++) {
  2470. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2471. "%s: pdev_id %d mac_id %d",
  2472. __func__, pdev->pdev_id, i);
  2473. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  2474. RXDMA_BUF, 1, i,
  2475. wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
  2476. QDF_TRACE(QDF_MODULE_ID_DP,
  2477. QDF_TRACE_LEVEL_ERROR,
  2478. FL("failed rx mac ring setup"));
  2479. return QDF_STATUS_E_FAILURE;
  2480. }
  2481. }
  2482. return QDF_STATUS_SUCCESS;
  2483. }
  2484. #else
  2485. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  2486. struct dp_pdev *pdev)
  2487. {
  2488. return QDF_STATUS_SUCCESS;
  2489. }
  2490. #endif
  2491. /**
  2492. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  2493. * @pdev - DP_PDEV handle
  2494. *
  2495. * Return: void
  2496. */
  2497. static inline void
  2498. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  2499. {
  2500. uint8_t map_id;
  2501. struct dp_soc *soc = pdev->soc;
  2502. if (!soc)
  2503. return;
  2504. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  2505. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  2506. default_dscp_tid_map,
  2507. sizeof(default_dscp_tid_map));
  2508. }
  2509. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  2510. hal_tx_set_dscp_tid_map(soc->hal_soc,
  2511. default_dscp_tid_map,
  2512. map_id);
  2513. }
  2514. }
  2515. #ifdef IPA_OFFLOAD
  2516. /**
  2517. * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
  2518. * @soc: data path instance
  2519. * @pdev: core txrx pdev context
  2520. *
  2521. * Return: QDF_STATUS_SUCCESS: success
  2522. * QDF_STATUS_E_RESOURCES: Error return
  2523. */
  2524. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2525. struct dp_pdev *pdev)
  2526. {
  2527. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2528. int entries;
  2529. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2530. entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
  2531. /* Setup second Rx refill buffer ring */
  2532. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2533. IPA_RX_REFILL_BUF_RING_IDX,
  2534. pdev->pdev_id,
  2535. entries)) {
  2536. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2537. FL("dp_srng_setup failed second rx refill ring"));
  2538. return QDF_STATUS_E_FAILURE;
  2539. }
  2540. return QDF_STATUS_SUCCESS;
  2541. }
  2542. /**
  2543. * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
  2544. * @soc: data path instance
  2545. * @pdev: core txrx pdev context
  2546. *
  2547. * Return: void
  2548. */
  2549. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2550. struct dp_pdev *pdev)
  2551. {
  2552. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
  2553. IPA_RX_REFILL_BUF_RING_IDX);
  2554. }
  2555. #else
  2556. static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2557. struct dp_pdev *pdev)
  2558. {
  2559. return QDF_STATUS_SUCCESS;
  2560. }
  2561. static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
  2562. struct dp_pdev *pdev)
  2563. {
  2564. }
  2565. #endif
  2566. #if !defined(DISABLE_MON_CONFIG)
  2567. /**
  2568. * dp_mon_rings_setup() - Initialize Monitor rings based on target
  2569. * @soc: soc handle
  2570. * @pdev: physical device handle
  2571. *
  2572. * Return: nonzero on failure and zero on success
  2573. */
  2574. static
  2575. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2576. {
  2577. int mac_id = 0;
  2578. int pdev_id = pdev->pdev_id;
  2579. int entries;
  2580. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  2581. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  2582. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2583. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  2584. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2585. entries =
  2586. wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  2587. if (dp_srng_setup(soc,
  2588. &pdev->rxdma_mon_buf_ring[mac_id],
  2589. RXDMA_MONITOR_BUF, 0, mac_for_pdev,
  2590. entries)) {
  2591. QDF_TRACE(QDF_MODULE_ID_DP,
  2592. QDF_TRACE_LEVEL_ERROR,
  2593. FL(RNG_ERR "rxdma_mon_buf_ring "));
  2594. return QDF_STATUS_E_NOMEM;
  2595. }
  2596. entries =
  2597. wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  2598. if (dp_srng_setup(soc,
  2599. &pdev->rxdma_mon_dst_ring[mac_id],
  2600. RXDMA_MONITOR_DST, 0, mac_for_pdev,
  2601. entries)) {
  2602. QDF_TRACE(QDF_MODULE_ID_DP,
  2603. QDF_TRACE_LEVEL_ERROR,
  2604. FL(RNG_ERR "rxdma_mon_dst_ring"));
  2605. return QDF_STATUS_E_NOMEM;
  2606. }
  2607. entries =
  2608. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2609. if (dp_srng_setup(soc,
  2610. &pdev->rxdma_mon_status_ring[mac_id],
  2611. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2612. entries)) {
  2613. QDF_TRACE(QDF_MODULE_ID_DP,
  2614. QDF_TRACE_LEVEL_ERROR,
  2615. FL(RNG_ERR "rxdma_mon_status_ring"));
  2616. return QDF_STATUS_E_NOMEM;
  2617. }
  2618. entries =
  2619. wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  2620. if (dp_srng_setup(soc,
  2621. &pdev->rxdma_mon_desc_ring[mac_id],
  2622. RXDMA_MONITOR_DESC, 0, mac_for_pdev,
  2623. entries)) {
  2624. QDF_TRACE(QDF_MODULE_ID_DP,
  2625. QDF_TRACE_LEVEL_ERROR,
  2626. FL(RNG_ERR "rxdma_mon_desc_ring"));
  2627. return QDF_STATUS_E_NOMEM;
  2628. }
  2629. } else {
  2630. entries =
  2631. wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  2632. if (dp_srng_setup(soc,
  2633. &pdev->rxdma_mon_status_ring[mac_id],
  2634. RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
  2635. entries)) {
  2636. QDF_TRACE(QDF_MODULE_ID_DP,
  2637. QDF_TRACE_LEVEL_ERROR,
  2638. FL(RNG_ERR "rxdma_mon_status_ring"));
  2639. return QDF_STATUS_E_NOMEM;
  2640. }
  2641. }
  2642. }
  2643. return QDF_STATUS_SUCCESS;
  2644. }
  2645. #else
  2646. static
  2647. QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
  2648. {
  2649. return QDF_STATUS_SUCCESS;
  2650. }
  2651. #endif
  2652. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  2653. * @pdev_hdl: pdev handle
  2654. */
  2655. #ifdef ATH_SUPPORT_EXT_STAT
  2656. void dp_iterate_update_peer_list(void *pdev_hdl)
  2657. {
  2658. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  2659. struct dp_soc *soc = pdev->soc;
  2660. struct dp_vdev *vdev = NULL;
  2661. struct dp_peer *peer = NULL;
  2662. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  2663. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  2664. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2665. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  2666. dp_cal_client_update_peer_stats(&peer->stats);
  2667. }
  2668. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2669. }
  2670. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  2671. }
  2672. #else
  2673. void dp_iterate_update_peer_list(void *pdev_hdl)
  2674. {
  2675. }
  2676. #endif
  2677. /*
  2678. * dp_pdev_attach_wifi3() - attach txrx pdev
  2679. * @ctrl_pdev: Opaque PDEV object
  2680. * @txrx_soc: Datapath SOC handle
  2681. * @htc_handle: HTC handle for host-target interface
  2682. * @qdf_osdev: QDF OS device
  2683. * @pdev_id: PDEV ID
  2684. *
  2685. * Return: DP PDEV handle on success, NULL on failure
  2686. */
  2687. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  2688. struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  2689. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  2690. {
  2691. int tx_ring_size;
  2692. int tx_comp_ring_size;
  2693. int reo_dst_ring_size;
  2694. int entries;
  2695. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  2696. int nss_cfg;
  2697. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2698. struct dp_pdev *pdev = NULL;
  2699. if (soc->dp_soc_reinit)
  2700. pdev = soc->pdev_list[pdev_id];
  2701. else
  2702. pdev = qdf_mem_malloc(sizeof(*pdev));
  2703. if (!pdev) {
  2704. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2705. FL("DP PDEV memory allocation failed"));
  2706. goto fail0;
  2707. }
  2708. /*
  2709. * Variable to prevent double pdev deinitialization during
  2710. * radio detach execution .i.e. in the absence of any vdev.
  2711. */
  2712. pdev->pdev_deinit = 0;
  2713. pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
  2714. if (!pdev->invalid_peer) {
  2715. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2716. FL("Invalid peer memory allocation failed"));
  2717. qdf_mem_free(pdev);
  2718. goto fail0;
  2719. }
  2720. soc_cfg_ctx = soc->wlan_cfg_ctx;
  2721. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
  2722. if (!pdev->wlan_cfg_ctx) {
  2723. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2724. FL("pdev cfg_attach failed"));
  2725. qdf_mem_free(pdev->invalid_peer);
  2726. qdf_mem_free(pdev);
  2727. goto fail0;
  2728. }
  2729. /*
  2730. * set nss pdev config based on soc config
  2731. */
  2732. nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
  2733. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  2734. (nss_cfg & (1 << pdev_id)));
  2735. pdev->soc = soc;
  2736. pdev->ctrl_pdev = ctrl_pdev;
  2737. pdev->pdev_id = pdev_id;
  2738. soc->pdev_list[pdev_id] = pdev;
  2739. pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
  2740. soc->pdev_count++;
  2741. TAILQ_INIT(&pdev->vdev_list);
  2742. qdf_spinlock_create(&pdev->vdev_list_lock);
  2743. pdev->vdev_count = 0;
  2744. qdf_spinlock_create(&pdev->tx_mutex);
  2745. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  2746. TAILQ_INIT(&pdev->neighbour_peers_list);
  2747. pdev->neighbour_peers_added = false;
  2748. if (dp_soc_cmn_setup(soc)) {
  2749. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2750. FL("dp_soc_cmn_setup failed"));
  2751. goto fail1;
  2752. }
  2753. /* Setup per PDEV TCL rings if configured */
  2754. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2755. tx_ring_size =
  2756. wlan_cfg_tx_ring_size(soc_cfg_ctx);
  2757. tx_comp_ring_size =
  2758. wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  2759. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  2760. pdev_id, pdev_id, tx_ring_size)) {
  2761. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2762. FL("dp_srng_setup failed for tcl_data_ring"));
  2763. goto fail1;
  2764. }
  2765. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  2766. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  2767. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2768. FL("dp_srng_setup failed for tx_comp_ring"));
  2769. goto fail1;
  2770. }
  2771. soc->num_tcl_data_rings++;
  2772. }
  2773. /* Tx specific init */
  2774. if (dp_tx_pdev_attach(pdev)) {
  2775. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2776. FL("dp_tx_pdev_attach failed"));
  2777. goto fail1;
  2778. }
  2779. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
  2780. /* Setup per PDEV REO rings if configured */
  2781. if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
  2782. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  2783. pdev_id, pdev_id, reo_dst_ring_size)) {
  2784. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2785. FL("dp_srng_setup failed for reo_dest_ringn"));
  2786. goto fail1;
  2787. }
  2788. soc->num_reo_dest_rings++;
  2789. }
  2790. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  2791. wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
  2792. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2793. FL("dp_srng_setup failed rx refill ring"));
  2794. goto fail1;
  2795. }
  2796. if (dp_rxdma_ring_setup(soc, pdev)) {
  2797. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2798. FL("RXDMA ring config failed"));
  2799. goto fail1;
  2800. }
  2801. if (dp_mon_rings_setup(soc, pdev)) {
  2802. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2803. FL("MONITOR rings setup failed"));
  2804. goto fail1;
  2805. }
  2806. entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
  2807. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2808. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  2809. 0, pdev_id,
  2810. entries)) {
  2811. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2812. FL(RNG_ERR "rxdma_err_dst_ring"));
  2813. goto fail1;
  2814. }
  2815. }
  2816. if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
  2817. goto fail1;
  2818. if (dp_ipa_ring_resource_setup(soc, pdev))
  2819. goto fail1;
  2820. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  2821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2822. FL("dp_ipa_uc_attach failed"));
  2823. goto fail1;
  2824. }
  2825. /* Rx specific init */
  2826. if (dp_rx_pdev_attach(pdev)) {
  2827. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2828. FL("dp_rx_pdev_attach failed"));
  2829. goto fail1;
  2830. }
  2831. DP_STATS_INIT(pdev);
  2832. /* Monitor filter init */
  2833. pdev->mon_filter_mode = MON_FILTER_ALL;
  2834. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  2835. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  2836. pdev->fp_data_filter = FILTER_DATA_ALL;
  2837. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  2838. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  2839. pdev->mo_data_filter = FILTER_DATA_ALL;
  2840. dp_local_peer_id_pool_init(pdev);
  2841. dp_dscp_tid_map_setup(pdev);
  2842. /* Rx monitor mode specific init */
  2843. if (dp_rx_pdev_mon_attach(pdev)) {
  2844. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2845. "dp_rx_pdev_mon_attach failed");
  2846. goto fail1;
  2847. }
  2848. if (dp_wdi_event_attach(pdev)) {
  2849. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2850. "dp_wdi_evet_attach failed");
  2851. goto fail1;
  2852. }
  2853. /* set the reo destination during initialization */
  2854. pdev->reo_dest = pdev->pdev_id + 1;
  2855. /*
  2856. * initialize ppdu tlv list
  2857. */
  2858. TAILQ_INIT(&pdev->ppdu_info_list);
  2859. pdev->tlv_count = 0;
  2860. pdev->list_depth = 0;
  2861. qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
  2862. pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
  2863. sizeof(struct cdp_tx_sojourn_stats), 0, 4,
  2864. TRUE);
  2865. /* initlialize cal client timer */
  2866. dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
  2867. &dp_iterate_update_peer_list);
  2868. return (struct cdp_pdev *)pdev;
  2869. fail1:
  2870. dp_pdev_detach((struct cdp_pdev *)pdev, 0);
  2871. fail0:
  2872. return NULL;
  2873. }
  2874. /*
  2875. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2876. * @soc: data path SoC handle
  2877. * @pdev: Physical device handle
  2878. *
  2879. * Return: void
  2880. */
  2881. #ifdef QCA_HOST2FW_RXBUF_RING
  2882. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2883. struct dp_pdev *pdev)
  2884. {
  2885. int max_mac_rings =
  2886. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2887. int i;
  2888. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2889. max_mac_rings : MAX_RX_MAC_RINGS;
  2890. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2891. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2892. RXDMA_BUF, 1);
  2893. qdf_timer_free(&soc->mon_reap_timer);
  2894. }
  2895. #else
  2896. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2897. struct dp_pdev *pdev)
  2898. {
  2899. }
  2900. #endif
  2901. /*
  2902. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2903. * @pdev: device object
  2904. *
  2905. * Return: void
  2906. */
  2907. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  2908. {
  2909. struct dp_neighbour_peer *peer = NULL;
  2910. struct dp_neighbour_peer *temp_peer = NULL;
  2911. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  2912. neighbour_peer_list_elem, temp_peer) {
  2913. /* delete this peer from the list */
  2914. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  2915. peer, neighbour_peer_list_elem);
  2916. qdf_mem_free(peer);
  2917. }
  2918. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  2919. }
  2920. /**
  2921. * dp_htt_ppdu_stats_detach() - detach stats resources
  2922. * @pdev: Datapath PDEV handle
  2923. *
  2924. * Return: void
  2925. */
  2926. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  2927. {
  2928. struct ppdu_info *ppdu_info, *ppdu_info_next;
  2929. TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
  2930. ppdu_info_list_elem, ppdu_info_next) {
  2931. if (!ppdu_info)
  2932. break;
  2933. qdf_assert_always(ppdu_info->nbuf);
  2934. qdf_nbuf_free(ppdu_info->nbuf);
  2935. qdf_mem_free(ppdu_info);
  2936. }
  2937. }
  2938. #if !defined(DISABLE_MON_CONFIG)
  2939. static
  2940. void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2941. int mac_id)
  2942. {
  2943. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  2944. dp_srng_cleanup(soc,
  2945. &pdev->rxdma_mon_buf_ring[mac_id],
  2946. RXDMA_MONITOR_BUF, 0);
  2947. dp_srng_cleanup(soc,
  2948. &pdev->rxdma_mon_dst_ring[mac_id],
  2949. RXDMA_MONITOR_DST, 0);
  2950. dp_srng_cleanup(soc,
  2951. &pdev->rxdma_mon_status_ring[mac_id],
  2952. RXDMA_MONITOR_STATUS, 0);
  2953. dp_srng_cleanup(soc,
  2954. &pdev->rxdma_mon_desc_ring[mac_id],
  2955. RXDMA_MONITOR_DESC, 0);
  2956. dp_srng_cleanup(soc,
  2957. &pdev->rxdma_err_dst_ring[mac_id],
  2958. RXDMA_DST, 0);
  2959. } else {
  2960. dp_srng_cleanup(soc,
  2961. &pdev->rxdma_mon_status_ring[mac_id],
  2962. RXDMA_MONITOR_STATUS, 0);
  2963. dp_srng_cleanup(soc,
  2964. &pdev->rxdma_err_dst_ring[mac_id],
  2965. RXDMA_DST, 0);
  2966. }
  2967. }
  2968. #else
  2969. static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
  2970. int mac_id)
  2971. {
  2972. }
  2973. #endif
  2974. /**
  2975. * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
  2976. *
  2977. * @soc: soc handle
  2978. * @pdev: datapath physical dev handle
  2979. * @mac_id: mac number
  2980. *
  2981. * Return: None
  2982. */
  2983. static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
  2984. int mac_id)
  2985. {
  2986. }
  2987. /**
  2988. * dp_pdev_mem_reset() - Reset txrx pdev memory
  2989. * @pdev: dp pdev handle
  2990. *
  2991. * Return: None
  2992. */
  2993. static void dp_pdev_mem_reset(struct dp_pdev *pdev)
  2994. {
  2995. uint16_t len = 0;
  2996. uint8_t *dp_pdev_offset = (uint8_t *)pdev;
  2997. len = sizeof(struct dp_pdev) -
  2998. offsetof(struct dp_pdev, pdev_deinit) -
  2999. sizeof(pdev->pdev_deinit);
  3000. dp_pdev_offset = dp_pdev_offset +
  3001. offsetof(struct dp_pdev, pdev_deinit) +
  3002. sizeof(pdev->pdev_deinit);
  3003. qdf_mem_zero(dp_pdev_offset, len);
  3004. }
  3005. /**
  3006. * dp_pdev_deinit() - Deinit txrx pdev
  3007. * @txrx_pdev: Datapath PDEV handle
  3008. * @force: Force deinit
  3009. *
  3010. * Return: None
  3011. */
  3012. static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
  3013. {
  3014. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3015. struct dp_soc *soc = pdev->soc;
  3016. qdf_nbuf_t curr_nbuf, next_nbuf;
  3017. int mac_id;
  3018. /*
  3019. * Prevent double pdev deinitialization during radio detach
  3020. * execution .i.e. in the absence of any vdev
  3021. */
  3022. if (pdev->pdev_deinit)
  3023. return;
  3024. pdev->pdev_deinit = 1;
  3025. dp_wdi_event_detach(pdev);
  3026. dp_tx_pdev_detach(pdev);
  3027. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3028. dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3029. TCL_DATA, pdev->pdev_id);
  3030. dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3031. WBM2SW_RELEASE, pdev->pdev_id);
  3032. }
  3033. dp_pktlogmod_exit(pdev);
  3034. dp_rx_pdev_detach(pdev);
  3035. dp_rx_pdev_mon_detach(pdev);
  3036. dp_neighbour_peers_detach(pdev);
  3037. qdf_spinlock_destroy(&pdev->tx_mutex);
  3038. qdf_spinlock_destroy(&pdev->vdev_list_lock);
  3039. dp_ipa_uc_detach(soc, pdev);
  3040. dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
  3041. /* Cleanup per PDEV REO rings if configured */
  3042. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3043. dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3044. REO_DST, pdev->pdev_id);
  3045. }
  3046. dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3047. dp_rxdma_ring_cleanup(soc, pdev);
  3048. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3049. dp_mon_ring_deinit(soc, pdev, mac_id);
  3050. dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3051. RXDMA_DST, 0);
  3052. }
  3053. curr_nbuf = pdev->invalid_peer_head_msdu;
  3054. while (curr_nbuf) {
  3055. next_nbuf = qdf_nbuf_next(curr_nbuf);
  3056. qdf_nbuf_free(curr_nbuf);
  3057. curr_nbuf = next_nbuf;
  3058. }
  3059. pdev->invalid_peer_head_msdu = NULL;
  3060. pdev->invalid_peer_tail_msdu = NULL;
  3061. dp_htt_ppdu_stats_detach(pdev);
  3062. qdf_nbuf_free(pdev->sojourn_buf);
  3063. dp_cal_client_detach(&pdev->cal_client_ctx);
  3064. soc->pdev_count--;
  3065. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  3066. qdf_mem_free(pdev->invalid_peer);
  3067. qdf_mem_free(pdev->dp_txrx_handle);
  3068. dp_pdev_mem_reset(pdev);
  3069. }
  3070. /**
  3071. * dp_pdev_deinit_wifi3() - Deinit txrx pdev
  3072. * @txrx_pdev: Datapath PDEV handle
  3073. * @force: Force deinit
  3074. *
  3075. * Return: None
  3076. */
  3077. static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3078. {
  3079. dp_pdev_deinit(txrx_pdev, force);
  3080. }
  3081. /*
  3082. * dp_pdev_detach() - Complete rest of pdev detach
  3083. * @txrx_pdev: Datapath PDEV handle
  3084. * @force: Force deinit
  3085. *
  3086. * Return: None
  3087. */
  3088. static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
  3089. {
  3090. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3091. struct dp_soc *soc = pdev->soc;
  3092. int mac_id;
  3093. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3094. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  3095. TCL_DATA, pdev->pdev_id);
  3096. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  3097. WBM2SW_RELEASE, pdev->pdev_id);
  3098. }
  3099. dp_mon_link_free(pdev);
  3100. /* Cleanup per PDEV REO rings if configured */
  3101. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3102. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  3103. REO_DST, pdev->pdev_id);
  3104. }
  3105. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  3106. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3107. dp_mon_ring_cleanup(soc, pdev, mac_id);
  3108. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
  3109. RXDMA_DST, 0);
  3110. }
  3111. soc->pdev_list[pdev->pdev_id] = NULL;
  3112. qdf_mem_free(pdev);
  3113. }
  3114. /*
  3115. * dp_pdev_detach_wifi3() - detach txrx pdev
  3116. * @txrx_pdev: Datapath PDEV handle
  3117. * @force: Force detach
  3118. *
  3119. * Return: None
  3120. */
  3121. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  3122. {
  3123. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3124. struct dp_soc *soc = pdev->soc;
  3125. if (soc->dp_soc_reinit) {
  3126. dp_pdev_detach(txrx_pdev, force);
  3127. } else {
  3128. dp_pdev_deinit(txrx_pdev, force);
  3129. dp_pdev_detach(txrx_pdev, force);
  3130. }
  3131. }
  3132. /*
  3133. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  3134. * @soc: DP SOC handle
  3135. */
  3136. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  3137. {
  3138. struct reo_desc_list_node *desc;
  3139. struct dp_rx_tid *rx_tid;
  3140. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  3141. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  3142. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  3143. rx_tid = &desc->rx_tid;
  3144. qdf_mem_unmap_nbytes_single(soc->osdev,
  3145. rx_tid->hw_qdesc_paddr,
  3146. QDF_DMA_BIDIRECTIONAL,
  3147. rx_tid->hw_qdesc_alloc_size);
  3148. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  3149. qdf_mem_free(desc);
  3150. }
  3151. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  3152. qdf_list_destroy(&soc->reo_desc_freelist);
  3153. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  3154. }
  3155. /**
  3156. * dp_soc_mem_reset() - Reset Dp Soc memory
  3157. * @soc: DP handle
  3158. *
  3159. * Return: None
  3160. */
  3161. static void dp_soc_mem_reset(struct dp_soc *soc)
  3162. {
  3163. uint16_t len = 0;
  3164. uint8_t *dp_soc_offset = (uint8_t *)soc;
  3165. len = sizeof(struct dp_soc) -
  3166. offsetof(struct dp_soc, dp_soc_reinit) -
  3167. sizeof(soc->dp_soc_reinit);
  3168. dp_soc_offset = dp_soc_offset +
  3169. offsetof(struct dp_soc, dp_soc_reinit) +
  3170. sizeof(soc->dp_soc_reinit);
  3171. qdf_mem_zero(dp_soc_offset, len);
  3172. }
  3173. /**
  3174. * dp_soc_deinit() - Deinitialize txrx SOC
  3175. * @txrx_soc: Opaque DP SOC handle
  3176. *
  3177. * Return: None
  3178. */
  3179. static void dp_soc_deinit(void *txrx_soc)
  3180. {
  3181. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3182. int i;
  3183. qdf_atomic_set(&soc->cmn_init_done, 0);
  3184. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3185. if (soc->pdev_list[i])
  3186. dp_pdev_deinit((struct cdp_pdev *)
  3187. soc->pdev_list[i], 1);
  3188. }
  3189. qdf_flush_work(&soc->htt_stats.work);
  3190. qdf_disable_work(&soc->htt_stats.work);
  3191. /* Free pending htt stats messages */
  3192. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  3193. dp_reo_cmdlist_destroy(soc);
  3194. dp_peer_find_detach(soc);
  3195. /* Free the ring memories */
  3196. /* Common rings */
  3197. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3198. /* Tx data rings */
  3199. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3200. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3201. dp_srng_deinit(soc, &soc->tcl_data_ring[i],
  3202. TCL_DATA, i);
  3203. dp_srng_deinit(soc, &soc->tx_comp_ring[i],
  3204. WBM2SW_RELEASE, i);
  3205. }
  3206. }
  3207. /* TCL command and status rings */
  3208. dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3209. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3210. /* Rx data rings */
  3211. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3212. soc->num_reo_dest_rings =
  3213. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3214. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3215. /* TODO: Get number of rings and ring sizes
  3216. * from wlan_cfg
  3217. */
  3218. dp_srng_deinit(soc, &soc->reo_dest_ring[i],
  3219. REO_DST, i);
  3220. }
  3221. }
  3222. /* REO reinjection ring */
  3223. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3224. /* Rx release ring */
  3225. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3226. /* Rx exception ring */
  3227. /* TODO: Better to store ring_type and ring_num in
  3228. * dp_srng during setup
  3229. */
  3230. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3231. /* REO command and status rings */
  3232. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3233. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3234. dp_soc_wds_detach(soc);
  3235. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  3236. qdf_spinlock_destroy(&soc->htt_stats.lock);
  3237. htt_soc_htc_dealloc(soc->htt_handle);
  3238. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  3239. dp_reo_cmdlist_destroy(soc);
  3240. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  3241. dp_reo_desc_freelist_destroy(soc);
  3242. qdf_spinlock_destroy(&soc->ast_lock);
  3243. dp_soc_mem_reset(soc);
  3244. }
  3245. /**
  3246. * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
  3247. * @txrx_soc: Opaque DP SOC handle
  3248. *
  3249. * Return: None
  3250. */
  3251. static void dp_soc_deinit_wifi3(void *txrx_soc)
  3252. {
  3253. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3254. soc->dp_soc_reinit = 1;
  3255. dp_soc_deinit(txrx_soc);
  3256. }
  3257. /*
  3258. * dp_soc_detach() - Detach rest of txrx SOC
  3259. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3260. *
  3261. * Return: None
  3262. */
  3263. static void dp_soc_detach(void *txrx_soc)
  3264. {
  3265. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3266. int i;
  3267. qdf_atomic_set(&soc->cmn_init_done, 0);
  3268. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  3269. * SW descriptors
  3270. */
  3271. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3272. if (soc->pdev_list[i])
  3273. dp_pdev_detach((struct cdp_pdev *)
  3274. soc->pdev_list[i], 1);
  3275. }
  3276. /* Free the ring memories */
  3277. /* Common rings */
  3278. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3279. dp_tx_soc_detach(soc);
  3280. /* Tx data rings */
  3281. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3282. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3283. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  3284. TCL_DATA, i);
  3285. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  3286. WBM2SW_RELEASE, i);
  3287. }
  3288. }
  3289. /* TCL command and status rings */
  3290. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  3291. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3292. /* Rx data rings */
  3293. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  3294. soc->num_reo_dest_rings =
  3295. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3296. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3297. /* TODO: Get number of rings and ring sizes
  3298. * from wlan_cfg
  3299. */
  3300. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  3301. REO_DST, i);
  3302. }
  3303. }
  3304. /* REO reinjection ring */
  3305. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3306. /* Rx release ring */
  3307. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3308. /* Rx exception ring */
  3309. /* TODO: Better to store ring_type and ring_num in
  3310. * dp_srng during setup
  3311. */
  3312. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3313. /* REO command and status rings */
  3314. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3315. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3316. dp_hw_link_desc_pool_cleanup(soc);
  3317. htt_soc_detach(soc->htt_handle);
  3318. soc->dp_soc_reinit = 0;
  3319. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  3320. qdf_mem_free(soc);
  3321. }
  3322. /*
  3323. * dp_soc_detach_wifi3() - Detach txrx SOC
  3324. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  3325. *
  3326. * Return: None
  3327. */
  3328. static void dp_soc_detach_wifi3(void *txrx_soc)
  3329. {
  3330. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3331. if (soc->dp_soc_reinit) {
  3332. dp_soc_detach(txrx_soc);
  3333. } else {
  3334. dp_soc_deinit(txrx_soc);
  3335. dp_soc_detach(txrx_soc);
  3336. }
  3337. }
  3338. #if !defined(DISABLE_MON_CONFIG)
  3339. /**
  3340. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  3341. * @soc: soc handle
  3342. * @pdev: physical device handle
  3343. * @mac_id: ring number
  3344. * @mac_for_pdev: mac_id
  3345. *
  3346. * Return: non-zero for failure, zero for success
  3347. */
  3348. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3349. struct dp_pdev *pdev,
  3350. int mac_id,
  3351. int mac_for_pdev)
  3352. {
  3353. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3354. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  3355. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3356. pdev->rxdma_mon_buf_ring[mac_id]
  3357. .hal_srng,
  3358. RXDMA_MONITOR_BUF);
  3359. if (status != QDF_STATUS_SUCCESS) {
  3360. dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  3361. return status;
  3362. }
  3363. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3364. pdev->rxdma_mon_dst_ring[mac_id]
  3365. .hal_srng,
  3366. RXDMA_MONITOR_DST);
  3367. if (status != QDF_STATUS_SUCCESS) {
  3368. dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  3369. return status;
  3370. }
  3371. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3372. pdev->rxdma_mon_status_ring[mac_id]
  3373. .hal_srng,
  3374. RXDMA_MONITOR_STATUS);
  3375. if (status != QDF_STATUS_SUCCESS) {
  3376. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3377. return status;
  3378. }
  3379. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3380. pdev->rxdma_mon_desc_ring[mac_id]
  3381. .hal_srng,
  3382. RXDMA_MONITOR_DESC);
  3383. if (status != QDF_STATUS_SUCCESS) {
  3384. dp_err("Failed to send htt srng message for Rxdma mon desc ring");
  3385. return status;
  3386. }
  3387. } else {
  3388. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3389. pdev->rxdma_mon_status_ring[mac_id]
  3390. .hal_srng,
  3391. RXDMA_MONITOR_STATUS);
  3392. if (status != QDF_STATUS_SUCCESS) {
  3393. dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
  3394. return status;
  3395. }
  3396. }
  3397. return status;
  3398. }
  3399. #else
  3400. static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  3401. struct dp_pdev *pdev,
  3402. int mac_id,
  3403. int mac_for_pdev)
  3404. {
  3405. return QDF_STATUS_SUCCESS;
  3406. }
  3407. #endif
  3408. /*
  3409. * dp_rxdma_ring_config() - configure the RX DMA rings
  3410. *
  3411. * This function is used to configure the MAC rings.
  3412. * On MCL host provides buffers in Host2FW ring
  3413. * FW refills (copies) buffers to the ring and updates
  3414. * ring_idx in register
  3415. *
  3416. * @soc: data path SoC handle
  3417. *
  3418. * Return: zero on success, non-zero on failure
  3419. */
  3420. #ifdef QCA_HOST2FW_RXBUF_RING
  3421. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3422. {
  3423. int i;
  3424. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3425. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3426. struct dp_pdev *pdev = soc->pdev_list[i];
  3427. if (pdev) {
  3428. int mac_id;
  3429. bool dbs_enable = 0;
  3430. int max_mac_rings =
  3431. wlan_cfg_get_num_mac_rings
  3432. (pdev->wlan_cfg_ctx);
  3433. htt_srng_setup(soc->htt_handle, 0,
  3434. pdev->rx_refill_buf_ring.hal_srng,
  3435. RXDMA_BUF);
  3436. if (pdev->rx_refill_buf_ring2.hal_srng)
  3437. htt_srng_setup(soc->htt_handle, 0,
  3438. pdev->rx_refill_buf_ring2.hal_srng,
  3439. RXDMA_BUF);
  3440. if (soc->cdp_soc.ol_ops->
  3441. is_hw_dbs_2x2_capable) {
  3442. dbs_enable = soc->cdp_soc.ol_ops->
  3443. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  3444. }
  3445. if (dbs_enable) {
  3446. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3447. QDF_TRACE_LEVEL_ERROR,
  3448. FL("DBS enabled max_mac_rings %d"),
  3449. max_mac_rings);
  3450. } else {
  3451. max_mac_rings = 1;
  3452. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3453. QDF_TRACE_LEVEL_ERROR,
  3454. FL("DBS disabled, max_mac_rings %d"),
  3455. max_mac_rings);
  3456. }
  3457. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3458. FL("pdev_id %d max_mac_rings %d"),
  3459. pdev->pdev_id, max_mac_rings);
  3460. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3461. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3462. mac_id, pdev->pdev_id);
  3463. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3464. QDF_TRACE_LEVEL_ERROR,
  3465. FL("mac_id %d"), mac_for_pdev);
  3466. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3467. pdev->rx_mac_buf_ring[mac_id]
  3468. .hal_srng,
  3469. RXDMA_BUF);
  3470. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3471. pdev->rxdma_err_dst_ring[mac_id]
  3472. .hal_srng,
  3473. RXDMA_DST);
  3474. /* Configure monitor mode rings */
  3475. status = dp_mon_htt_srng_setup(soc, pdev,
  3476. mac_id,
  3477. mac_for_pdev);
  3478. if (status != QDF_STATUS_SUCCESS) {
  3479. dp_err("Failed to send htt monitor messages to target");
  3480. return status;
  3481. }
  3482. }
  3483. }
  3484. }
  3485. /*
  3486. * Timer to reap rxdma status rings.
  3487. * Needed until we enable ppdu end interrupts
  3488. */
  3489. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  3490. dp_service_mon_rings, (void *)soc,
  3491. QDF_TIMER_TYPE_WAKE_APPS);
  3492. soc->reap_timer_init = 1;
  3493. return status;
  3494. }
  3495. #else
  3496. /* This is only for WIN */
  3497. static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
  3498. {
  3499. int i;
  3500. int mac_id;
  3501. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3502. for (i = 0; i < MAX_PDEV_CNT; i++) {
  3503. struct dp_pdev *pdev = soc->pdev_list[i];
  3504. if (pdev == NULL)
  3505. continue;
  3506. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3507. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
  3508. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3509. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  3510. #ifndef DISABLE_MON_CONFIG
  3511. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3512. pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
  3513. RXDMA_MONITOR_BUF);
  3514. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3515. pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
  3516. RXDMA_MONITOR_DST);
  3517. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3518. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  3519. RXDMA_MONITOR_STATUS);
  3520. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3521. pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
  3522. RXDMA_MONITOR_DESC);
  3523. #endif
  3524. htt_srng_setup(soc->htt_handle, mac_for_pdev,
  3525. pdev->rxdma_err_dst_ring[mac_id].hal_srng,
  3526. RXDMA_DST);
  3527. }
  3528. }
  3529. return status;
  3530. }
  3531. #endif
  3532. /*
  3533. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  3534. * @cdp_soc: Opaque Datapath SOC handle
  3535. *
  3536. * Return: zero on success, non-zero on failure
  3537. */
  3538. static QDF_STATUS
  3539. dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  3540. {
  3541. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3542. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3543. htt_soc_attach_target(soc->htt_handle);
  3544. status = dp_rxdma_ring_config(soc);
  3545. if (status != QDF_STATUS_SUCCESS) {
  3546. dp_err("Failed to send htt srng setup messages to target");
  3547. return status;
  3548. }
  3549. DP_STATS_INIT(soc);
  3550. /* initialize work queue for stats processing */
  3551. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3552. return QDF_STATUS_SUCCESS;
  3553. }
  3554. /*
  3555. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  3556. * @txrx_soc: Datapath SOC handle
  3557. */
  3558. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  3559. {
  3560. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3561. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  3562. }
  3563. /*
  3564. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  3565. * @txrx_soc: Datapath SOC handle
  3566. * @nss_cfg: nss config
  3567. */
  3568. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  3569. {
  3570. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  3571. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
  3572. wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
  3573. /*
  3574. * TODO: masked out based on the per offloaded radio
  3575. */
  3576. switch (config) {
  3577. case dp_nss_cfg_default:
  3578. break;
  3579. case dp_nss_cfg_dbdc:
  3580. case dp_nss_cfg_dbtc:
  3581. wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
  3582. wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
  3583. wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
  3584. wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
  3585. break;
  3586. default:
  3587. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3588. "Invalid offload config %d", config);
  3589. }
  3590. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3591. FL("nss-wifi<0> nss config is enabled"));
  3592. }
  3593. /*
  3594. * dp_vdev_attach_wifi3() - attach txrx vdev
  3595. * @txrx_pdev: Datapath PDEV handle
  3596. * @vdev_mac_addr: MAC address of the virtual interface
  3597. * @vdev_id: VDEV Id
  3598. * @wlan_op_mode: VDEV operating mode
  3599. *
  3600. * Return: DP VDEV handle on success, NULL on failure
  3601. */
  3602. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  3603. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  3604. {
  3605. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  3606. struct dp_soc *soc = pdev->soc;
  3607. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  3608. if (!vdev) {
  3609. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3610. FL("DP VDEV memory allocation failed"));
  3611. goto fail0;
  3612. }
  3613. vdev->pdev = pdev;
  3614. vdev->vdev_id = vdev_id;
  3615. vdev->opmode = op_mode;
  3616. vdev->osdev = soc->osdev;
  3617. vdev->osif_rx = NULL;
  3618. vdev->osif_rsim_rx_decap = NULL;
  3619. vdev->osif_get_key = NULL;
  3620. vdev->osif_rx_mon = NULL;
  3621. vdev->osif_tx_free_ext = NULL;
  3622. vdev->osif_vdev = NULL;
  3623. vdev->delete.pending = 0;
  3624. vdev->safemode = 0;
  3625. vdev->drop_unenc = 1;
  3626. vdev->sec_type = cdp_sec_type_none;
  3627. #ifdef notyet
  3628. vdev->filters_num = 0;
  3629. #endif
  3630. qdf_mem_copy(
  3631. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  3632. /* TODO: Initialize default HTT meta data that will be used in
  3633. * TCL descriptors for packets transmitted from this VDEV
  3634. */
  3635. TAILQ_INIT(&vdev->peer_list);
  3636. if ((soc->intr_mode == DP_INTR_POLL) &&
  3637. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  3638. if ((pdev->vdev_count == 0) ||
  3639. (wlan_op_mode_monitor == vdev->opmode))
  3640. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  3641. }
  3642. if (wlan_op_mode_monitor == vdev->opmode) {
  3643. pdev->monitor_vdev = vdev;
  3644. return (struct cdp_vdev *)vdev;
  3645. }
  3646. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3647. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  3648. vdev->dscp_tid_map_id = 0;
  3649. vdev->mcast_enhancement_en = 0;
  3650. vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
  3651. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3652. /* add this vdev into the pdev's list */
  3653. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  3654. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3655. pdev->vdev_count++;
  3656. dp_tx_vdev_attach(vdev);
  3657. if (pdev->vdev_count == 1)
  3658. dp_lro_hash_setup(soc, pdev);
  3659. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3660. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  3661. DP_STATS_INIT(vdev);
  3662. if (wlan_op_mode_sta == vdev->opmode)
  3663. dp_peer_create_wifi3((struct cdp_vdev *)vdev,
  3664. vdev->mac_addr.raw,
  3665. NULL);
  3666. return (struct cdp_vdev *)vdev;
  3667. fail0:
  3668. return NULL;
  3669. }
  3670. /**
  3671. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  3672. * @vdev: Datapath VDEV handle
  3673. * @osif_vdev: OSIF vdev handle
  3674. * @ctrl_vdev: UMAC vdev handle
  3675. * @txrx_ops: Tx and Rx operations
  3676. *
  3677. * Return: DP VDEV handle on success, NULL on failure
  3678. */
  3679. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  3680. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  3681. struct ol_txrx_ops *txrx_ops)
  3682. {
  3683. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3684. vdev->osif_vdev = osif_vdev;
  3685. vdev->ctrl_vdev = ctrl_vdev;
  3686. vdev->osif_rx = txrx_ops->rx.rx;
  3687. vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
  3688. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  3689. vdev->osif_get_key = txrx_ops->get_key;
  3690. vdev->osif_rx_mon = txrx_ops->rx.mon;
  3691. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  3692. #ifdef notyet
  3693. #if ATH_SUPPORT_WAPI
  3694. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  3695. #endif
  3696. #endif
  3697. #ifdef UMAC_SUPPORT_PROXY_ARP
  3698. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  3699. #endif
  3700. vdev->me_convert = txrx_ops->me_convert;
  3701. /* TODO: Enable the following once Tx code is integrated */
  3702. if (vdev->mesh_vdev)
  3703. txrx_ops->tx.tx = dp_tx_send_mesh;
  3704. else
  3705. txrx_ops->tx.tx = dp_tx_send;
  3706. txrx_ops->tx.tx_exception = dp_tx_send_exception;
  3707. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  3708. "DP Vdev Register success");
  3709. }
  3710. /**
  3711. * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
  3712. * @vdev: Datapath VDEV handle
  3713. *
  3714. * Return: void
  3715. */
  3716. static void dp_vdev_flush_peers(struct dp_vdev *vdev)
  3717. {
  3718. struct dp_pdev *pdev = vdev->pdev;
  3719. struct dp_soc *soc = pdev->soc;
  3720. struct dp_peer *peer;
  3721. uint16_t *peer_ids;
  3722. uint8_t i = 0, j = 0;
  3723. peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
  3724. if (!peer_ids) {
  3725. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3726. "DP alloc failure - unable to flush peers");
  3727. return;
  3728. }
  3729. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3730. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3731. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  3732. if (peer->peer_ids[i] != HTT_INVALID_PEER)
  3733. if (j < soc->max_peers)
  3734. peer_ids[j++] = peer->peer_ids[i];
  3735. }
  3736. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3737. for (i = 0; i < j ; i++) {
  3738. peer = dp_peer_find_by_id(soc, peer_ids[i]);
  3739. if (peer) {
  3740. dp_info("peer: %pM is getting flush",
  3741. peer->mac_addr.raw);
  3742. dp_peer_delete_wifi3(peer, 0);
  3743. /*
  3744. * we need to call dp_peer_unref_del_find_by_id()
  3745. * to remove additional ref count incremented
  3746. * by dp_peer_find_by_id() call.
  3747. *
  3748. * Hold the ref count while executing
  3749. * dp_peer_delete_wifi3() call.
  3750. *
  3751. */
  3752. dp_peer_unref_del_find_by_id(peer);
  3753. }
  3754. dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
  3755. NULL, 0);
  3756. }
  3757. qdf_mem_free(peer_ids);
  3758. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3759. FL("Flushed peers for vdev object %pK "), vdev);
  3760. }
  3761. /*
  3762. * dp_vdev_detach_wifi3() - Detach txrx vdev
  3763. * @txrx_vdev: Datapath VDEV handle
  3764. * @callback: Callback OL_IF on completion of detach
  3765. * @cb_context: Callback context
  3766. *
  3767. */
  3768. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  3769. ol_txrx_vdev_delete_cb callback, void *cb_context)
  3770. {
  3771. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3772. struct dp_pdev *pdev = vdev->pdev;
  3773. struct dp_soc *soc = pdev->soc;
  3774. struct dp_neighbour_peer *peer = NULL;
  3775. struct dp_neighbour_peer *temp_peer = NULL;
  3776. /* preconditions */
  3777. qdf_assert(vdev);
  3778. if (wlan_op_mode_monitor == vdev->opmode)
  3779. goto free_vdev;
  3780. if (wlan_op_mode_sta == vdev->opmode)
  3781. dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
  3782. /*
  3783. * If Target is hung, flush all peers before detaching vdev
  3784. * this will free all references held due to missing
  3785. * unmap commands from Target
  3786. */
  3787. if ((hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) ||
  3788. !hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  3789. dp_vdev_flush_peers(vdev);
  3790. /*
  3791. * Use peer_ref_mutex while accessing peer_list, in case
  3792. * a peer is in the process of being removed from the list.
  3793. */
  3794. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  3795. /* check that the vdev has no peers allocated */
  3796. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  3797. /* debug print - will be removed later */
  3798. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  3799. FL("not deleting vdev object %pK (%pM)"
  3800. "until deletion finishes for all its peers"),
  3801. vdev, vdev->mac_addr.raw);
  3802. /* indicate that the vdev needs to be deleted */
  3803. vdev->delete.pending = 1;
  3804. vdev->delete.callback = callback;
  3805. vdev->delete.context = cb_context;
  3806. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3807. return;
  3808. }
  3809. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  3810. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  3811. if (!soc->hw_nac_monitor_support) {
  3812. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  3813. neighbour_peer_list_elem) {
  3814. QDF_ASSERT(peer->vdev != vdev);
  3815. }
  3816. } else {
  3817. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  3818. neighbour_peer_list_elem, temp_peer) {
  3819. if (peer->vdev == vdev) {
  3820. TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
  3821. neighbour_peer_list_elem);
  3822. qdf_mem_free(peer);
  3823. }
  3824. }
  3825. }
  3826. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  3827. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  3828. dp_tx_vdev_detach(vdev);
  3829. /* remove the vdev from its parent pdev's list */
  3830. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  3831. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3832. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  3833. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  3834. free_vdev:
  3835. qdf_mem_free(vdev);
  3836. if (callback)
  3837. callback(cb_context);
  3838. }
  3839. /*
  3840. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  3841. * @soc - datapath soc handle
  3842. * @peer - datapath peer handle
  3843. *
  3844. * Delete the AST entries belonging to a peer
  3845. */
  3846. #ifdef FEATURE_AST
  3847. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3848. struct dp_peer *peer)
  3849. {
  3850. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  3851. qdf_spin_lock_bh(&soc->ast_lock);
  3852. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  3853. dp_peer_del_ast(soc, ast_entry);
  3854. peer->self_ast_entry = NULL;
  3855. TAILQ_INIT(&peer->ast_entry_list);
  3856. qdf_spin_unlock_bh(&soc->ast_lock);
  3857. }
  3858. #else
  3859. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  3860. struct dp_peer *peer)
  3861. {
  3862. }
  3863. #endif
  3864. #if ATH_SUPPORT_WRAP
  3865. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3866. uint8_t *peer_mac_addr)
  3867. {
  3868. struct dp_peer *peer;
  3869. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3870. 0, vdev->vdev_id);
  3871. if (!peer)
  3872. return NULL;
  3873. if (peer->bss_peer)
  3874. return peer;
  3875. dp_peer_unref_delete(peer);
  3876. return NULL;
  3877. }
  3878. #else
  3879. static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
  3880. uint8_t *peer_mac_addr)
  3881. {
  3882. struct dp_peer *peer;
  3883. peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
  3884. 0, vdev->vdev_id);
  3885. if (!peer)
  3886. return NULL;
  3887. if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
  3888. return peer;
  3889. dp_peer_unref_delete(peer);
  3890. return NULL;
  3891. }
  3892. #endif
  3893. #if defined(FEATURE_AST)
  3894. #if !defined(AST_HKV1_WORKAROUND)
  3895. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3896. uint8_t *peer_mac_addr)
  3897. {
  3898. struct dp_ast_entry *ast_entry;
  3899. qdf_spin_lock_bh(&soc->ast_lock);
  3900. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3901. if (ast_entry && ast_entry->next_hop)
  3902. dp_peer_del_ast(soc, ast_entry);
  3903. qdf_spin_unlock_bh(&soc->ast_lock);
  3904. }
  3905. #else
  3906. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3907. uint8_t *peer_mac_addr)
  3908. {
  3909. struct dp_ast_entry *ast_entry;
  3910. if (soc->ast_override_support) {
  3911. qdf_spin_lock_bh(&soc->ast_lock);
  3912. ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
  3913. if (ast_entry && ast_entry->next_hop)
  3914. dp_peer_del_ast(soc, ast_entry);
  3915. qdf_spin_unlock_bh(&soc->ast_lock);
  3916. }
  3917. }
  3918. #endif
  3919. #else
  3920. static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
  3921. uint8_t *peer_mac_addr)
  3922. {
  3923. }
  3924. #endif
  3925. /*
  3926. * dp_peer_create_wifi3() - attach txrx peer
  3927. * @txrx_vdev: Datapath VDEV handle
  3928. * @peer_mac_addr: Peer MAC address
  3929. *
  3930. * Return: DP peeer handle on success, NULL on failure
  3931. */
  3932. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  3933. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  3934. {
  3935. struct dp_peer *peer;
  3936. int i;
  3937. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3938. struct dp_pdev *pdev;
  3939. struct dp_soc *soc;
  3940. enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
  3941. /* preconditions */
  3942. qdf_assert(vdev);
  3943. qdf_assert(peer_mac_addr);
  3944. pdev = vdev->pdev;
  3945. soc = pdev->soc;
  3946. /*
  3947. * If a peer entry with given MAC address already exists,
  3948. * reuse the peer and reset the state of peer.
  3949. */
  3950. peer = dp_peer_can_reuse(vdev, peer_mac_addr);
  3951. if (peer) {
  3952. qdf_atomic_init(&peer->is_default_route_set);
  3953. dp_peer_cleanup(vdev, peer);
  3954. peer->delete_in_progress = false;
  3955. dp_peer_delete_ast_entries(soc, peer);
  3956. if ((vdev->opmode == wlan_op_mode_sta) &&
  3957. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  3958. DP_MAC_ADDR_LEN)) {
  3959. ast_type = CDP_TXRX_AST_TYPE_SELF;
  3960. }
  3961. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  3962. /*
  3963. * Control path maintains a node count which is incremented
  3964. * for every new peer create command. Since new peer is not being
  3965. * created and earlier reference is reused here,
  3966. * peer_unref_delete event is sent to control path to
  3967. * increment the count back.
  3968. */
  3969. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  3970. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  3971. peer->mac_addr.raw, vdev->mac_addr.raw,
  3972. vdev->opmode);
  3973. }
  3974. peer->ctrl_peer = ctrl_peer;
  3975. dp_local_peer_id_alloc(pdev, peer);
  3976. DP_STATS_INIT(peer);
  3977. return (void *)peer;
  3978. } else {
  3979. /*
  3980. * When a STA roams from RPTR AP to ROOT AP and vice versa, we
  3981. * need to remove the AST entry which was earlier added as a WDS
  3982. * entry.
  3983. * If an AST entry exists, but no peer entry exists with a given
  3984. * MAC addresses, we could deduce it as a WDS entry
  3985. */
  3986. dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
  3987. }
  3988. #ifdef notyet
  3989. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  3990. soc->mempool_ol_ath_peer);
  3991. #else
  3992. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  3993. #endif
  3994. if (!peer)
  3995. return NULL; /* failure */
  3996. qdf_mem_zero(peer, sizeof(struct dp_peer));
  3997. TAILQ_INIT(&peer->ast_entry_list);
  3998. /* store provided params */
  3999. peer->vdev = vdev;
  4000. peer->ctrl_peer = ctrl_peer;
  4001. if ((vdev->opmode == wlan_op_mode_sta) &&
  4002. !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
  4003. DP_MAC_ADDR_LEN)) {
  4004. ast_type = CDP_TXRX_AST_TYPE_SELF;
  4005. }
  4006. dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
  4007. qdf_spinlock_create(&peer->peer_info_lock);
  4008. qdf_mem_copy(
  4009. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  4010. /* TODO: See of rx_opt_proc is really required */
  4011. peer->rx_opt_proc = soc->rx_opt_proc;
  4012. /* initialize the peer_id */
  4013. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  4014. peer->peer_ids[i] = HTT_INVALID_PEER;
  4015. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4016. qdf_atomic_init(&peer->ref_cnt);
  4017. /* keep one reference for attach */
  4018. qdf_atomic_inc(&peer->ref_cnt);
  4019. /* add this peer into the vdev's list */
  4020. if (wlan_op_mode_sta == vdev->opmode)
  4021. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  4022. else
  4023. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  4024. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4025. /* TODO: See if hash based search is required */
  4026. dp_peer_find_hash_add(soc, peer);
  4027. /* Initialize the peer state */
  4028. peer->state = OL_TXRX_PEER_STATE_DISC;
  4029. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4030. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  4031. vdev, peer, peer->mac_addr.raw,
  4032. qdf_atomic_read(&peer->ref_cnt));
  4033. /*
  4034. * For every peer MAp message search and set if bss_peer
  4035. */
  4036. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  4037. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4038. "vdev bss_peer!!!!");
  4039. peer->bss_peer = 1;
  4040. vdev->vap_bss_peer = peer;
  4041. }
  4042. for (i = 0; i < DP_MAX_TIDS; i++)
  4043. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  4044. dp_local_peer_id_alloc(pdev, peer);
  4045. DP_STATS_INIT(peer);
  4046. return (void *)peer;
  4047. }
  4048. /*
  4049. * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
  4050. * @vdev: Datapath VDEV handle
  4051. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4052. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4053. *
  4054. * Return: None
  4055. */
  4056. static
  4057. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  4058. enum cdp_host_reo_dest_ring *reo_dest,
  4059. bool *hash_based)
  4060. {
  4061. struct dp_soc *soc;
  4062. struct dp_pdev *pdev;
  4063. pdev = vdev->pdev;
  4064. soc = pdev->soc;
  4065. /*
  4066. * hash based steering is disabled for Radios which are offloaded
  4067. * to NSS
  4068. */
  4069. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  4070. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  4071. /*
  4072. * Below line of code will ensure the proper reo_dest ring is chosen
  4073. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  4074. */
  4075. *reo_dest = pdev->reo_dest;
  4076. }
  4077. #ifdef IPA_OFFLOAD
  4078. /*
  4079. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4080. * @vdev: Datapath VDEV handle
  4081. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4082. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4083. *
  4084. * If IPA is enabled in ini, for SAP mode, disable hash based
  4085. * steering, use default reo_dst ring for RX. Use config values for other modes.
  4086. * Return: None
  4087. */
  4088. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4089. enum cdp_host_reo_dest_ring *reo_dest,
  4090. bool *hash_based)
  4091. {
  4092. struct dp_soc *soc;
  4093. struct dp_pdev *pdev;
  4094. pdev = vdev->pdev;
  4095. soc = pdev->soc;
  4096. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4097. /*
  4098. * If IPA is enabled, disable hash-based flow steering and set
  4099. * reo_dest_ring_4 as the REO ring to receive packets on.
  4100. * IPA is configured to reap reo_dest_ring_4.
  4101. *
  4102. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  4103. * value enum value is from 1 - 4.
  4104. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  4105. */
  4106. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  4107. if (vdev->opmode == wlan_op_mode_ap) {
  4108. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  4109. *hash_based = 0;
  4110. }
  4111. }
  4112. }
  4113. #else
  4114. /*
  4115. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  4116. * @vdev: Datapath VDEV handle
  4117. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  4118. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  4119. *
  4120. * Use system config values for hash based steering.
  4121. * Return: None
  4122. */
  4123. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  4124. enum cdp_host_reo_dest_ring *reo_dest,
  4125. bool *hash_based)
  4126. {
  4127. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  4128. }
  4129. #endif /* IPA_OFFLOAD */
  4130. /*
  4131. * dp_peer_setup_wifi3() - initialize the peer
  4132. * @vdev_hdl: virtual device object
  4133. * @peer: Peer object
  4134. *
  4135. * Return: void
  4136. */
  4137. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4138. {
  4139. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  4140. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  4141. struct dp_pdev *pdev;
  4142. struct dp_soc *soc;
  4143. bool hash_based = 0;
  4144. enum cdp_host_reo_dest_ring reo_dest;
  4145. /* preconditions */
  4146. qdf_assert(vdev);
  4147. qdf_assert(peer);
  4148. pdev = vdev->pdev;
  4149. soc = pdev->soc;
  4150. peer->last_assoc_rcvd = 0;
  4151. peer->last_disassoc_rcvd = 0;
  4152. peer->last_deauth_rcvd = 0;
  4153. dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
  4154. dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
  4155. pdev->pdev_id, vdev->vdev_id,
  4156. vdev->opmode, hash_based, reo_dest);
  4157. /*
  4158. * There are corner cases where the AD1 = AD2 = "VAPs address"
  4159. * i.e both the devices have same MAC address. In these
  4160. * cases we want such pkts to be processed in NULL Q handler
  4161. * which is REO2TCL ring. for this reason we should
  4162. * not setup reo_queues and default route for bss_peer.
  4163. */
  4164. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
  4165. return;
  4166. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  4167. /* TODO: Check the destination ring number to be passed to FW */
  4168. soc->cdp_soc.ol_ops->peer_set_default_routing(
  4169. pdev->ctrl_pdev, peer->mac_addr.raw,
  4170. peer->vdev->vdev_id, hash_based, reo_dest);
  4171. }
  4172. qdf_atomic_set(&peer->is_default_route_set, 1);
  4173. dp_peer_rx_init(pdev, peer);
  4174. return;
  4175. }
  4176. /*
  4177. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  4178. * @vdev_handle: virtual device object
  4179. * @htt_pkt_type: type of pkt
  4180. *
  4181. * Return: void
  4182. */
  4183. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  4184. enum htt_cmn_pkt_type val)
  4185. {
  4186. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4187. vdev->tx_encap_type = val;
  4188. }
  4189. /*
  4190. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  4191. * @vdev_handle: virtual device object
  4192. * @htt_pkt_type: type of pkt
  4193. *
  4194. * Return: void
  4195. */
  4196. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  4197. enum htt_cmn_pkt_type val)
  4198. {
  4199. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4200. vdev->rx_decap_type = val;
  4201. }
  4202. /*
  4203. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  4204. * @txrx_soc: cdp soc handle
  4205. * @ac: Access category
  4206. * @value: timeout value in millisec
  4207. *
  4208. * Return: void
  4209. */
  4210. static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4211. uint8_t ac, uint32_t value)
  4212. {
  4213. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4214. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  4215. }
  4216. /*
  4217. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  4218. * @txrx_soc: cdp soc handle
  4219. * @ac: access category
  4220. * @value: timeout value in millisec
  4221. *
  4222. * Return: void
  4223. */
  4224. static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  4225. uint8_t ac, uint32_t *value)
  4226. {
  4227. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4228. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  4229. }
  4230. /*
  4231. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  4232. * @pdev_handle: physical device object
  4233. * @val: reo destination ring index (1 - 4)
  4234. *
  4235. * Return: void
  4236. */
  4237. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  4238. enum cdp_host_reo_dest_ring val)
  4239. {
  4240. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4241. if (pdev)
  4242. pdev->reo_dest = val;
  4243. }
  4244. /*
  4245. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  4246. * @pdev_handle: physical device object
  4247. *
  4248. * Return: reo destination ring index
  4249. */
  4250. static enum cdp_host_reo_dest_ring
  4251. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  4252. {
  4253. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4254. if (pdev)
  4255. return pdev->reo_dest;
  4256. else
  4257. return cdp_host_reo_dest_ring_unknown;
  4258. }
  4259. /*
  4260. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  4261. * @pdev_handle: device object
  4262. * @val: value to be set
  4263. *
  4264. * Return: void
  4265. */
  4266. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  4267. uint32_t val)
  4268. {
  4269. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4270. /* Enable/Disable smart mesh filtering. This flag will be checked
  4271. * during rx processing to check if packets are from NAC clients.
  4272. */
  4273. pdev->filter_neighbour_peers = val;
  4274. return 0;
  4275. }
  4276. /*
  4277. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  4278. * address for smart mesh filtering
  4279. * @vdev_handle: virtual device object
  4280. * @cmd: Add/Del command
  4281. * @macaddr: nac client mac address
  4282. *
  4283. * Return: void
  4284. */
  4285. static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
  4286. uint32_t cmd, uint8_t *macaddr)
  4287. {
  4288. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4289. struct dp_pdev *pdev = vdev->pdev;
  4290. struct dp_neighbour_peer *peer = NULL;
  4291. if (!macaddr)
  4292. goto fail0;
  4293. /* Store address of NAC (neighbour peer) which will be checked
  4294. * against TA of received packets.
  4295. */
  4296. if (cmd == DP_NAC_PARAM_ADD) {
  4297. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  4298. sizeof(*peer));
  4299. if (!peer) {
  4300. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4301. FL("DP neighbour peer node memory allocation failed"));
  4302. goto fail0;
  4303. }
  4304. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  4305. macaddr, DP_MAC_ADDR_LEN);
  4306. peer->vdev = vdev;
  4307. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4308. /* add this neighbour peer into the list */
  4309. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  4310. neighbour_peer_list_elem);
  4311. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4312. /* first neighbour */
  4313. if (!pdev->neighbour_peers_added) {
  4314. pdev->neighbour_peers_added = true;
  4315. dp_ppdu_ring_cfg(pdev);
  4316. }
  4317. return 1;
  4318. } else if (cmd == DP_NAC_PARAM_DEL) {
  4319. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  4320. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  4321. neighbour_peer_list_elem) {
  4322. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  4323. macaddr, DP_MAC_ADDR_LEN)) {
  4324. /* delete this peer from the list */
  4325. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  4326. peer, neighbour_peer_list_elem);
  4327. qdf_mem_free(peer);
  4328. break;
  4329. }
  4330. }
  4331. /* last neighbour deleted */
  4332. if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
  4333. pdev->neighbour_peers_added = false;
  4334. dp_ppdu_ring_cfg(pdev);
  4335. }
  4336. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  4337. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  4338. !pdev->enhanced_stats_en)
  4339. dp_ppdu_ring_reset(pdev);
  4340. return 1;
  4341. }
  4342. fail0:
  4343. return 0;
  4344. }
  4345. /*
  4346. * dp_get_sec_type() - Get the security type
  4347. * @peer: Datapath peer handle
  4348. * @sec_idx: Security id (mcast, ucast)
  4349. *
  4350. * return sec_type: Security type
  4351. */
  4352. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  4353. {
  4354. struct dp_peer *dpeer = (struct dp_peer *)peer;
  4355. return dpeer->security[sec_idx].sec_type;
  4356. }
  4357. /*
  4358. * dp_peer_authorize() - authorize txrx peer
  4359. * @peer_handle: Datapath peer handle
  4360. * @authorize
  4361. *
  4362. */
  4363. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  4364. {
  4365. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4366. struct dp_soc *soc;
  4367. if (peer != NULL) {
  4368. soc = peer->vdev->pdev->soc;
  4369. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4370. peer->authorize = authorize ? 1 : 0;
  4371. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4372. }
  4373. }
  4374. static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
  4375. struct dp_pdev *pdev,
  4376. struct dp_peer *peer,
  4377. uint32_t vdev_id)
  4378. {
  4379. struct dp_vdev *vdev = NULL;
  4380. struct dp_peer *bss_peer = NULL;
  4381. uint8_t *m_addr = NULL;
  4382. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4383. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4384. if (vdev->vdev_id == vdev_id)
  4385. break;
  4386. }
  4387. if (!vdev) {
  4388. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4389. "vdev is NULL");
  4390. } else {
  4391. if (vdev->vap_bss_peer == peer)
  4392. vdev->vap_bss_peer = NULL;
  4393. m_addr = peer->mac_addr.raw;
  4394. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  4395. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
  4396. m_addr, vdev->mac_addr.raw, vdev->opmode);
  4397. if (vdev && vdev->vap_bss_peer) {
  4398. bss_peer = vdev->vap_bss_peer;
  4399. DP_UPDATE_STATS(vdev, peer);
  4400. }
  4401. }
  4402. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4403. qdf_mem_free(peer);
  4404. }
  4405. /**
  4406. * dp_delete_pending_vdev() - check and process vdev delete
  4407. * @pdev: DP specific pdev pointer
  4408. * @vdev: DP specific vdev pointer
  4409. * @vdev_id: vdev id corresponding to vdev
  4410. *
  4411. * This API does following:
  4412. * 1) It releases tx flow pools buffers as vdev is
  4413. * going down and no peers are associated.
  4414. * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
  4415. */
  4416. static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4417. uint8_t vdev_id)
  4418. {
  4419. ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
  4420. void *vdev_delete_context = NULL;
  4421. vdev_delete_cb = vdev->delete.callback;
  4422. vdev_delete_context = vdev->delete.context;
  4423. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4424. FL("deleting vdev object %pK (%pM)- its last peer is done"),
  4425. vdev, vdev->mac_addr.raw);
  4426. /* all peers are gone, go ahead and delete it */
  4427. dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  4428. FLOW_TYPE_VDEV, vdev_id);
  4429. dp_tx_vdev_detach(vdev);
  4430. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4431. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  4432. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4433. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4434. FL("deleting vdev object %pK (%pM)"),
  4435. vdev, vdev->mac_addr.raw);
  4436. qdf_mem_free(vdev);
  4437. vdev = NULL;
  4438. if (vdev_delete_cb)
  4439. vdev_delete_cb(vdev_delete_context);
  4440. }
  4441. /*
  4442. * dp_peer_unref_delete() - unref and delete peer
  4443. * @peer_handle: Datapath peer handle
  4444. *
  4445. */
  4446. void dp_peer_unref_delete(void *peer_handle)
  4447. {
  4448. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4449. struct dp_vdev *vdev = peer->vdev;
  4450. struct dp_pdev *pdev = vdev->pdev;
  4451. struct dp_soc *soc = pdev->soc;
  4452. struct dp_peer *tmppeer;
  4453. int found = 0;
  4454. uint16_t peer_id;
  4455. uint16_t vdev_id;
  4456. bool delete_vdev;
  4457. /*
  4458. * Hold the lock all the way from checking if the peer ref count
  4459. * is zero until the peer references are removed from the hash
  4460. * table and vdev list (if the peer ref count is zero).
  4461. * This protects against a new HL tx operation starting to use the
  4462. * peer object just after this function concludes it's done being used.
  4463. * Furthermore, the lock needs to be held while checking whether the
  4464. * vdev's list of peers is empty, to make sure that list is not modified
  4465. * concurrently with the empty check.
  4466. */
  4467. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  4468. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  4469. peer_id = peer->peer_ids[0];
  4470. vdev_id = vdev->vdev_id;
  4471. /*
  4472. * Make sure that the reference to the peer in
  4473. * peer object map is removed
  4474. */
  4475. if (peer_id != HTT_INVALID_PEER)
  4476. soc->peer_id_to_obj_map[peer_id] = NULL;
  4477. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4478. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  4479. /* remove the reference to the peer from the hash table */
  4480. dp_peer_find_hash_remove(soc, peer);
  4481. qdf_spin_lock_bh(&soc->ast_lock);
  4482. if (peer->self_ast_entry) {
  4483. dp_peer_del_ast(soc, peer->self_ast_entry);
  4484. peer->self_ast_entry = NULL;
  4485. }
  4486. qdf_spin_unlock_bh(&soc->ast_lock);
  4487. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  4488. if (tmppeer == peer) {
  4489. found = 1;
  4490. break;
  4491. }
  4492. }
  4493. if (found) {
  4494. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  4495. peer_list_elem);
  4496. } else {
  4497. /*Ignoring the remove operation as peer not found*/
  4498. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4499. "peer:%pK not found in vdev:%pK peerlist:%pK",
  4500. peer, vdev, &peer->vdev->peer_list);
  4501. }
  4502. /* cleanup the peer data */
  4503. dp_peer_cleanup(vdev, peer);
  4504. /* check whether the parent vdev has no peers left */
  4505. if (TAILQ_EMPTY(&vdev->peer_list)) {
  4506. /*
  4507. * capture vdev delete pending flag's status
  4508. * while holding peer_ref_mutex lock
  4509. */
  4510. delete_vdev = vdev->delete.pending;
  4511. /*
  4512. * Now that there are no references to the peer, we can
  4513. * release the peer reference lock.
  4514. */
  4515. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4516. /*
  4517. * Check if the parent vdev was waiting for its peers
  4518. * to be deleted, in order for it to be deleted too.
  4519. */
  4520. if (delete_vdev)
  4521. dp_delete_pending_vdev(pdev, vdev, vdev_id);
  4522. } else {
  4523. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4524. }
  4525. dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
  4526. } else {
  4527. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  4528. }
  4529. }
  4530. /*
  4531. * dp_peer_detach_wifi3() – Detach txrx peer
  4532. * @peer_handle: Datapath peer handle
  4533. * @bitmap: bitmap indicating special handling of request.
  4534. *
  4535. */
  4536. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  4537. {
  4538. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4539. /* redirect the peer's rx delivery function to point to a
  4540. * discard func
  4541. */
  4542. peer->rx_opt_proc = dp_rx_discard;
  4543. peer->ctrl_peer = NULL;
  4544. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  4545. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  4546. dp_local_peer_id_free(peer->vdev->pdev, peer);
  4547. qdf_spinlock_destroy(&peer->peer_info_lock);
  4548. /*
  4549. * Remove the reference added during peer_attach.
  4550. * The peer will still be left allocated until the
  4551. * PEER_UNMAP message arrives to remove the other
  4552. * reference, added by the PEER_MAP message.
  4553. */
  4554. dp_peer_unref_delete(peer_handle);
  4555. }
  4556. /*
  4557. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  4558. * @peer_handle: Datapath peer handle
  4559. *
  4560. */
  4561. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  4562. {
  4563. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4564. return vdev->mac_addr.raw;
  4565. }
  4566. /*
  4567. * dp_vdev_set_wds() - Enable per packet stats
  4568. * @vdev_handle: DP VDEV handle
  4569. * @val: value
  4570. *
  4571. * Return: none
  4572. */
  4573. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  4574. {
  4575. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4576. vdev->wds_enabled = val;
  4577. return 0;
  4578. }
  4579. /*
  4580. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  4581. * @peer_handle: Datapath peer handle
  4582. *
  4583. */
  4584. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  4585. uint8_t vdev_id)
  4586. {
  4587. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4588. struct dp_vdev *vdev = NULL;
  4589. if (qdf_unlikely(!pdev))
  4590. return NULL;
  4591. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  4592. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  4593. if (vdev->vdev_id == vdev_id)
  4594. break;
  4595. }
  4596. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  4597. return (struct cdp_vdev *)vdev;
  4598. }
  4599. /*
  4600. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
  4601. * @dev: PDEV handle
  4602. *
  4603. * Return: VDEV handle of monitor mode
  4604. */
  4605. static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
  4606. {
  4607. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  4608. if (qdf_unlikely(!pdev))
  4609. return NULL;
  4610. return (struct cdp_vdev *)pdev->monitor_vdev;
  4611. }
  4612. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  4613. {
  4614. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4615. return vdev->opmode;
  4616. }
  4617. static
  4618. void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
  4619. ol_txrx_rx_fp *stack_fn_p,
  4620. ol_osif_vdev_handle *osif_vdev_p)
  4621. {
  4622. struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
  4623. qdf_assert(vdev);
  4624. *stack_fn_p = vdev->osif_rx_stack;
  4625. *osif_vdev_p = vdev->osif_vdev;
  4626. }
  4627. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  4628. {
  4629. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  4630. struct dp_pdev *pdev = vdev->pdev;
  4631. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  4632. }
  4633. /**
  4634. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  4635. * ring based on target
  4636. * @soc: soc handle
  4637. * @mac_for_pdev: pdev_id
  4638. * @pdev: physical device handle
  4639. * @ring_num: mac id
  4640. * @htt_tlv_filter: tlv filter
  4641. *
  4642. * Return: zero on success, non-zero on failure
  4643. */
  4644. static inline
  4645. QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  4646. struct dp_pdev *pdev, uint8_t ring_num,
  4647. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  4648. {
  4649. QDF_STATUS status;
  4650. if (soc->wlan_cfg_ctx->rxdma1_enable)
  4651. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4652. pdev->rxdma_mon_buf_ring[ring_num]
  4653. .hal_srng,
  4654. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
  4655. &htt_tlv_filter);
  4656. else
  4657. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4658. pdev->rx_mac_buf_ring[ring_num]
  4659. .hal_srng,
  4660. RXDMA_BUF, RX_BUFFER_SIZE,
  4661. &htt_tlv_filter);
  4662. return status;
  4663. }
  4664. /**
  4665. * dp_reset_monitor_mode() - Disable monitor mode
  4666. * @pdev_handle: Datapath PDEV handle
  4667. *
  4668. * Return: 0 on success, not 0 on failure
  4669. */
  4670. static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  4671. {
  4672. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4673. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4674. struct dp_soc *soc = pdev->soc;
  4675. uint8_t pdev_id;
  4676. int mac_id;
  4677. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4678. pdev_id = pdev->pdev_id;
  4679. soc = pdev->soc;
  4680. qdf_spin_lock_bh(&pdev->mon_lock);
  4681. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4682. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4683. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4684. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4685. pdev, mac_id,
  4686. htt_tlv_filter);
  4687. if (status != QDF_STATUS_SUCCESS) {
  4688. dp_err("Failed to send tlv filter for monitor mode rings");
  4689. return status;
  4690. }
  4691. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4692. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4693. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
  4694. &htt_tlv_filter);
  4695. }
  4696. pdev->monitor_vdev = NULL;
  4697. pdev->mcopy_mode = 0;
  4698. qdf_spin_unlock_bh(&pdev->mon_lock);
  4699. return QDF_STATUS_SUCCESS;
  4700. }
  4701. /**
  4702. * dp_set_nac() - set peer_nac
  4703. * @peer_handle: Datapath PEER handle
  4704. *
  4705. * Return: void
  4706. */
  4707. static void dp_set_nac(struct cdp_peer *peer_handle)
  4708. {
  4709. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4710. peer->nac = 1;
  4711. }
  4712. /**
  4713. * dp_get_tx_pending() - read pending tx
  4714. * @pdev_handle: Datapath PDEV handle
  4715. *
  4716. * Return: outstanding tx
  4717. */
  4718. static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
  4719. {
  4720. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4721. return qdf_atomic_read(&pdev->num_tx_outstanding);
  4722. }
  4723. /**
  4724. * dp_get_peer_mac_from_peer_id() - get peer mac
  4725. * @pdev_handle: Datapath PDEV handle
  4726. * @peer_id: Peer ID
  4727. * @peer_mac: MAC addr of PEER
  4728. *
  4729. * Return: void
  4730. */
  4731. static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
  4732. uint32_t peer_id, uint8_t *peer_mac)
  4733. {
  4734. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4735. struct dp_peer *peer;
  4736. if (pdev && peer_mac) {
  4737. peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
  4738. if (peer) {
  4739. qdf_mem_copy(peer_mac, peer->mac_addr.raw,
  4740. DP_MAC_ADDR_LEN);
  4741. dp_peer_unref_del_find_by_id(peer);
  4742. }
  4743. }
  4744. }
  4745. /**
  4746. * dp_pdev_configure_monitor_rings() - configure monitor rings
  4747. * @vdev_handle: Datapath VDEV handle
  4748. *
  4749. * Return: void
  4750. */
  4751. static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
  4752. {
  4753. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4754. struct dp_soc *soc;
  4755. uint8_t pdev_id;
  4756. int mac_id;
  4757. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4758. pdev_id = pdev->pdev_id;
  4759. soc = pdev->soc;
  4760. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4761. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4762. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4763. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4764. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4765. pdev->mo_data_filter);
  4766. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4767. htt_tlv_filter.mpdu_start = 1;
  4768. htt_tlv_filter.msdu_start = 1;
  4769. htt_tlv_filter.packet = 1;
  4770. htt_tlv_filter.msdu_end = 1;
  4771. htt_tlv_filter.mpdu_end = 1;
  4772. htt_tlv_filter.packet_header = 1;
  4773. htt_tlv_filter.attention = 1;
  4774. htt_tlv_filter.ppdu_start = 0;
  4775. htt_tlv_filter.ppdu_end = 0;
  4776. htt_tlv_filter.ppdu_end_user_stats = 0;
  4777. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4778. htt_tlv_filter.ppdu_end_status_done = 0;
  4779. htt_tlv_filter.header_per_msdu = 1;
  4780. htt_tlv_filter.enable_fp =
  4781. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4782. htt_tlv_filter.enable_md = 0;
  4783. htt_tlv_filter.enable_mo =
  4784. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4785. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4786. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4787. if (pdev->mcopy_mode)
  4788. htt_tlv_filter.fp_data_filter = 0;
  4789. else
  4790. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4791. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4792. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4793. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4794. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4795. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4796. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4797. pdev, mac_id,
  4798. htt_tlv_filter);
  4799. if (status != QDF_STATUS_SUCCESS) {
  4800. dp_err("Failed to send tlv filter for monitor mode rings");
  4801. return status;
  4802. }
  4803. }
  4804. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4805. htt_tlv_filter.mpdu_start = 1;
  4806. htt_tlv_filter.msdu_start = 0;
  4807. htt_tlv_filter.packet = 0;
  4808. htt_tlv_filter.msdu_end = 0;
  4809. htt_tlv_filter.mpdu_end = 0;
  4810. htt_tlv_filter.attention = 0;
  4811. htt_tlv_filter.ppdu_start = 1;
  4812. htt_tlv_filter.ppdu_end = 1;
  4813. htt_tlv_filter.ppdu_end_user_stats = 1;
  4814. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4815. htt_tlv_filter.ppdu_end_status_done = 1;
  4816. htt_tlv_filter.enable_fp = 1;
  4817. htt_tlv_filter.enable_md = 0;
  4818. htt_tlv_filter.enable_mo = 1;
  4819. if (pdev->mcopy_mode) {
  4820. htt_tlv_filter.packet_header = 1;
  4821. }
  4822. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4823. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4824. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4825. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4826. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4827. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4828. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4829. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4830. pdev->pdev_id);
  4831. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4832. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4833. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4834. }
  4835. return status;
  4836. }
  4837. /**
  4838. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  4839. * @vdev_handle: Datapath VDEV handle
  4840. * @smart_monitor: Flag to denote if its smart monitor mode
  4841. *
  4842. * Return: 0 on success, not 0 on failure
  4843. */
  4844. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  4845. uint8_t smart_monitor)
  4846. {
  4847. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4848. struct dp_pdev *pdev;
  4849. qdf_assert(vdev);
  4850. pdev = vdev->pdev;
  4851. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4852. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  4853. pdev, pdev->pdev_id, pdev->soc, vdev);
  4854. /*Check if current pdev's monitor_vdev exists */
  4855. if (pdev->monitor_vdev || pdev->mcopy_mode) {
  4856. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4857. "monitor vap already created vdev=%pK\n", vdev);
  4858. qdf_assert(vdev);
  4859. return QDF_STATUS_E_RESOURCES;
  4860. }
  4861. pdev->monitor_vdev = vdev;
  4862. /* If smart monitor mode, do not configure monitor ring */
  4863. if (smart_monitor)
  4864. return QDF_STATUS_SUCCESS;
  4865. return dp_pdev_configure_monitor_rings(pdev);
  4866. }
  4867. /**
  4868. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  4869. * @pdev_handle: Datapath PDEV handle
  4870. * @filter_val: Flag to select Filter for monitor mode
  4871. * Return: 0 on success, not 0 on failure
  4872. */
  4873. static QDF_STATUS
  4874. dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  4875. struct cdp_monitor_filter *filter_val)
  4876. {
  4877. /* Many monitor VAPs can exists in a system but only one can be up at
  4878. * anytime
  4879. */
  4880. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4881. struct dp_vdev *vdev = pdev->monitor_vdev;
  4882. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4883. struct dp_soc *soc;
  4884. uint8_t pdev_id;
  4885. int mac_id;
  4886. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4887. pdev_id = pdev->pdev_id;
  4888. soc = pdev->soc;
  4889. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  4890. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  4891. pdev, pdev_id, soc, vdev);
  4892. /*Check if current pdev's monitor_vdev exists */
  4893. if (!pdev->monitor_vdev) {
  4894. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4895. "vdev=%pK", vdev);
  4896. qdf_assert(vdev);
  4897. }
  4898. /* update filter mode, type in pdev structure */
  4899. pdev->mon_filter_mode = filter_val->mode;
  4900. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  4901. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  4902. pdev->fp_data_filter = filter_val->fp_data;
  4903. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  4904. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  4905. pdev->mo_data_filter = filter_val->mo_data;
  4906. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  4907. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
  4908. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  4909. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  4910. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  4911. pdev->mo_data_filter);
  4912. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4913. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4914. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4915. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4916. pdev, mac_id,
  4917. htt_tlv_filter);
  4918. if (status != QDF_STATUS_SUCCESS) {
  4919. dp_err("Failed to send tlv filter for monitor mode rings");
  4920. return status;
  4921. }
  4922. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4923. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4924. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4925. }
  4926. htt_tlv_filter.mpdu_start = 1;
  4927. htt_tlv_filter.msdu_start = 1;
  4928. htt_tlv_filter.packet = 1;
  4929. htt_tlv_filter.msdu_end = 1;
  4930. htt_tlv_filter.mpdu_end = 1;
  4931. htt_tlv_filter.packet_header = 1;
  4932. htt_tlv_filter.attention = 1;
  4933. htt_tlv_filter.ppdu_start = 0;
  4934. htt_tlv_filter.ppdu_end = 0;
  4935. htt_tlv_filter.ppdu_end_user_stats = 0;
  4936. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  4937. htt_tlv_filter.ppdu_end_status_done = 0;
  4938. htt_tlv_filter.header_per_msdu = 1;
  4939. htt_tlv_filter.enable_fp =
  4940. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  4941. htt_tlv_filter.enable_md = 0;
  4942. htt_tlv_filter.enable_mo =
  4943. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  4944. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  4945. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  4946. if (pdev->mcopy_mode)
  4947. htt_tlv_filter.fp_data_filter = 0;
  4948. else
  4949. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  4950. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  4951. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  4952. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  4953. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4954. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  4955. status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
  4956. pdev, mac_id,
  4957. htt_tlv_filter);
  4958. if (status != QDF_STATUS_SUCCESS) {
  4959. dp_err("Failed to send tlv filter for monitor mode rings");
  4960. return status;
  4961. }
  4962. }
  4963. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4964. htt_tlv_filter.mpdu_start = 1;
  4965. htt_tlv_filter.msdu_start = 0;
  4966. htt_tlv_filter.packet = 0;
  4967. htt_tlv_filter.msdu_end = 0;
  4968. htt_tlv_filter.mpdu_end = 0;
  4969. htt_tlv_filter.attention = 0;
  4970. htt_tlv_filter.ppdu_start = 1;
  4971. htt_tlv_filter.ppdu_end = 1;
  4972. htt_tlv_filter.ppdu_end_user_stats = 1;
  4973. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4974. htt_tlv_filter.ppdu_end_status_done = 1;
  4975. htt_tlv_filter.enable_fp = 1;
  4976. htt_tlv_filter.enable_md = 0;
  4977. htt_tlv_filter.enable_mo = 1;
  4978. if (pdev->mcopy_mode) {
  4979. htt_tlv_filter.packet_header = 1;
  4980. }
  4981. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4982. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4983. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4984. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4985. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4986. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4987. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4988. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  4989. pdev->pdev_id);
  4990. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  4991. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  4992. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  4993. }
  4994. return QDF_STATUS_SUCCESS;
  4995. }
  4996. /**
  4997. * dp_get_pdev_id_frm_pdev() - get pdev_id
  4998. * @pdev_handle: Datapath PDEV handle
  4999. *
  5000. * Return: pdev_id
  5001. */
  5002. static
  5003. uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
  5004. {
  5005. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5006. return pdev->pdev_id;
  5007. }
  5008. /**
  5009. * dp_pdev_set_chan_noise_floor() - set channel noise floor
  5010. * @pdev_handle: Datapath PDEV handle
  5011. * @chan_noise_floor: Channel Noise Floor
  5012. *
  5013. * Return: void
  5014. */
  5015. static
  5016. void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
  5017. int16_t chan_noise_floor)
  5018. {
  5019. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5020. pdev->chan_noise_floor = chan_noise_floor;
  5021. }
  5022. /**
  5023. * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
  5024. * @vdev_handle: Datapath VDEV handle
  5025. * Return: true on ucast filter flag set
  5026. */
  5027. static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
  5028. {
  5029. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5030. struct dp_pdev *pdev;
  5031. pdev = vdev->pdev;
  5032. if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  5033. (pdev->mo_data_filter & FILTER_DATA_UCAST))
  5034. return true;
  5035. return false;
  5036. }
  5037. /**
  5038. * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
  5039. * @vdev_handle: Datapath VDEV handle
  5040. * Return: true on mcast filter flag set
  5041. */
  5042. static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
  5043. {
  5044. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5045. struct dp_pdev *pdev;
  5046. pdev = vdev->pdev;
  5047. if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  5048. (pdev->mo_data_filter & FILTER_DATA_MCAST))
  5049. return true;
  5050. return false;
  5051. }
  5052. /**
  5053. * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
  5054. * @vdev_handle: Datapath VDEV handle
  5055. * Return: true on non data filter flag set
  5056. */
  5057. static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
  5058. {
  5059. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5060. struct dp_pdev *pdev;
  5061. pdev = vdev->pdev;
  5062. if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  5063. (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  5064. if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  5065. (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  5066. return true;
  5067. }
  5068. }
  5069. return false;
  5070. }
  5071. #ifdef MESH_MODE_SUPPORT
  5072. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  5073. {
  5074. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5075. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5076. FL("val %d"), val);
  5077. vdev->mesh_vdev = val;
  5078. }
  5079. /*
  5080. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  5081. * @vdev_hdl: virtual device object
  5082. * @val: value to be set
  5083. *
  5084. * Return: void
  5085. */
  5086. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  5087. {
  5088. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  5089. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5090. FL("val %d"), val);
  5091. vdev->mesh_rx_filter = val;
  5092. }
  5093. #endif
  5094. /*
  5095. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  5096. * Current scope is bar received count
  5097. *
  5098. * @pdev_handle: DP_PDEV handle
  5099. *
  5100. * Return: void
  5101. */
  5102. #define STATS_PROC_TIMEOUT (HZ/1000)
  5103. static void
  5104. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  5105. {
  5106. struct dp_vdev *vdev;
  5107. struct dp_peer *peer;
  5108. uint32_t waitcnt;
  5109. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5110. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5111. if (!peer) {
  5112. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5113. FL("DP Invalid Peer refernce"));
  5114. return;
  5115. }
  5116. if (peer->delete_in_progress) {
  5117. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5118. FL("DP Peer deletion in progress"));
  5119. continue;
  5120. }
  5121. qdf_atomic_inc(&peer->ref_cnt);
  5122. waitcnt = 0;
  5123. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  5124. while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
  5125. && waitcnt < 10) {
  5126. schedule_timeout_interruptible(
  5127. STATS_PROC_TIMEOUT);
  5128. waitcnt++;
  5129. }
  5130. qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
  5131. dp_peer_unref_delete(peer);
  5132. }
  5133. }
  5134. }
  5135. /**
  5136. * dp_rx_bar_stats_cb(): BAR received stats callback
  5137. * @soc: SOC handle
  5138. * @cb_ctxt: Call back context
  5139. * @reo_status: Reo status
  5140. *
  5141. * return: void
  5142. */
  5143. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  5144. union hal_reo_status *reo_status)
  5145. {
  5146. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  5147. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  5148. if (!qdf_atomic_read(&soc->cmn_init_done))
  5149. return;
  5150. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  5151. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  5152. queue_status->header.status);
  5153. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5154. return;
  5155. }
  5156. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  5157. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  5158. }
  5159. /**
  5160. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  5161. * @vdev: DP VDEV handle
  5162. *
  5163. * return: void
  5164. */
  5165. void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
  5166. struct cdp_vdev_stats *vdev_stats)
  5167. {
  5168. struct dp_peer *peer = NULL;
  5169. struct dp_soc *soc = NULL;
  5170. if (!vdev)
  5171. return;
  5172. soc = vdev->pdev->soc;
  5173. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  5174. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  5175. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
  5176. dp_update_vdev_stats(vdev_stats, peer);
  5177. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  5178. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5179. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5180. vdev_stats, vdev->vdev_id,
  5181. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5182. #endif
  5183. }
  5184. /**
  5185. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  5186. * @pdev: DP PDEV handle
  5187. *
  5188. * return: void
  5189. */
  5190. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  5191. {
  5192. struct dp_vdev *vdev = NULL;
  5193. struct cdp_vdev_stats *vdev_stats =
  5194. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5195. if (!vdev_stats) {
  5196. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5197. "DP alloc failure - unable to get alloc vdev stats");
  5198. return;
  5199. }
  5200. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  5201. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  5202. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  5203. if (pdev->mcopy_mode)
  5204. DP_UPDATE_STATS(pdev, pdev->invalid_peer);
  5205. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  5206. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  5207. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5208. dp_update_pdev_stats(pdev, vdev_stats);
  5209. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
  5210. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  5211. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  5212. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  5213. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  5214. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  5215. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  5216. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  5217. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
  5218. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  5219. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
  5220. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  5221. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  5222. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  5223. DP_STATS_AGGR(pdev, vdev,
  5224. tx_i.mcast_en.dropped_map_error);
  5225. DP_STATS_AGGR(pdev, vdev,
  5226. tx_i.mcast_en.dropped_self_mac);
  5227. DP_STATS_AGGR(pdev, vdev,
  5228. tx_i.mcast_en.dropped_send_fail);
  5229. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  5230. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  5231. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  5232. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  5233. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
  5234. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  5235. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
  5236. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
  5237. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
  5238. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
  5239. DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
  5240. pdev->stats.tx_i.dropped.dropped_pkt.num =
  5241. pdev->stats.tx_i.dropped.dma_error +
  5242. pdev->stats.tx_i.dropped.ring_full +
  5243. pdev->stats.tx_i.dropped.enqueue_fail +
  5244. pdev->stats.tx_i.dropped.desc_na.num +
  5245. pdev->stats.tx_i.dropped.res_full;
  5246. pdev->stats.tx.last_ack_rssi =
  5247. vdev->stats.tx.last_ack_rssi;
  5248. pdev->stats.tx_i.tso.num_seg =
  5249. vdev->stats.tx_i.tso.num_seg;
  5250. }
  5251. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  5252. qdf_mem_free(vdev_stats);
  5253. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5254. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
  5255. pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
  5256. #endif
  5257. }
  5258. /**
  5259. * dp_vdev_getstats() - get vdev packet level stats
  5260. * @vdev_handle: Datapath VDEV handle
  5261. * @stats: cdp network device stats structure
  5262. *
  5263. * Return: void
  5264. */
  5265. static void dp_vdev_getstats(void *vdev_handle,
  5266. struct cdp_dev_stats *stats)
  5267. {
  5268. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  5269. struct cdp_vdev_stats *vdev_stats =
  5270. qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
  5271. if (!vdev_stats) {
  5272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5273. "DP alloc failure - unable to get alloc vdev stats");
  5274. return;
  5275. }
  5276. dp_aggregate_vdev_stats(vdev, vdev_stats);
  5277. stats->tx_packets = vdev_stats->tx_i.rcvd.num;
  5278. stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
  5279. stats->tx_errors = vdev_stats->tx.tx_failed +
  5280. vdev_stats->tx_i.dropped.dropped_pkt.num;
  5281. stats->tx_dropped = stats->tx_errors;
  5282. stats->rx_packets = vdev_stats->rx.unicast.num +
  5283. vdev_stats->rx.multicast.num +
  5284. vdev_stats->rx.bcast.num;
  5285. stats->rx_bytes = vdev_stats->rx.unicast.bytes +
  5286. vdev_stats->rx.multicast.bytes +
  5287. vdev_stats->rx.bcast.bytes;
  5288. }
  5289. /**
  5290. * dp_pdev_getstats() - get pdev packet level stats
  5291. * @pdev_handle: Datapath PDEV handle
  5292. * @stats: cdp network device stats structure
  5293. *
  5294. * Return: void
  5295. */
  5296. static void dp_pdev_getstats(void *pdev_handle,
  5297. struct cdp_dev_stats *stats)
  5298. {
  5299. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  5300. dp_aggregate_pdev_stats(pdev);
  5301. stats->tx_packets = pdev->stats.tx_i.rcvd.num;
  5302. stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
  5303. stats->tx_errors = pdev->stats.tx.tx_failed +
  5304. pdev->stats.tx_i.dropped.dropped_pkt.num;
  5305. stats->tx_dropped = stats->tx_errors;
  5306. stats->rx_packets = pdev->stats.rx.unicast.num +
  5307. pdev->stats.rx.multicast.num +
  5308. pdev->stats.rx.bcast.num;
  5309. stats->rx_bytes = pdev->stats.rx.unicast.bytes +
  5310. pdev->stats.rx.multicast.bytes +
  5311. pdev->stats.rx.bcast.bytes;
  5312. }
  5313. /**
  5314. * dp_get_device_stats() - get interface level packet stats
  5315. * @handle: device handle
  5316. * @stats: cdp network device stats structure
  5317. * @type: device type pdev/vdev
  5318. *
  5319. * Return: void
  5320. */
  5321. static void dp_get_device_stats(void *handle,
  5322. struct cdp_dev_stats *stats, uint8_t type)
  5323. {
  5324. switch (type) {
  5325. case UPDATE_VDEV_STATS:
  5326. dp_vdev_getstats(handle, stats);
  5327. break;
  5328. case UPDATE_PDEV_STATS:
  5329. dp_pdev_getstats(handle, stats);
  5330. break;
  5331. default:
  5332. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  5333. "apstats cannot be updated for this input "
  5334. "type %d", type);
  5335. break;
  5336. }
  5337. }
  5338. /**
  5339. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  5340. * @pdev: DP_PDEV Handle
  5341. *
  5342. * Return:void
  5343. */
  5344. static inline void
  5345. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  5346. {
  5347. uint8_t index = 0;
  5348. DP_PRINT_STATS("PDEV Tx Stats:\n");
  5349. DP_PRINT_STATS("Received From Stack:");
  5350. DP_PRINT_STATS(" Packets = %d",
  5351. pdev->stats.tx_i.rcvd.num);
  5352. DP_PRINT_STATS(" Bytes = %llu",
  5353. pdev->stats.tx_i.rcvd.bytes);
  5354. DP_PRINT_STATS("Processed:");
  5355. DP_PRINT_STATS(" Packets = %d",
  5356. pdev->stats.tx_i.processed.num);
  5357. DP_PRINT_STATS(" Bytes = %llu",
  5358. pdev->stats.tx_i.processed.bytes);
  5359. DP_PRINT_STATS("Total Completions:");
  5360. DP_PRINT_STATS(" Packets = %u",
  5361. pdev->stats.tx.comp_pkt.num);
  5362. DP_PRINT_STATS(" Bytes = %llu",
  5363. pdev->stats.tx.comp_pkt.bytes);
  5364. DP_PRINT_STATS("Successful Completions:");
  5365. DP_PRINT_STATS(" Packets = %u",
  5366. pdev->stats.tx.tx_success.num);
  5367. DP_PRINT_STATS(" Bytes = %llu",
  5368. pdev->stats.tx.tx_success.bytes);
  5369. DP_PRINT_STATS("Dropped:");
  5370. DP_PRINT_STATS(" Total = %d",
  5371. pdev->stats.tx_i.dropped.dropped_pkt.num);
  5372. DP_PRINT_STATS(" Dma_map_error = %d",
  5373. pdev->stats.tx_i.dropped.dma_error);
  5374. DP_PRINT_STATS(" Ring Full = %d",
  5375. pdev->stats.tx_i.dropped.ring_full);
  5376. DP_PRINT_STATS(" Descriptor Not available = %d",
  5377. pdev->stats.tx_i.dropped.desc_na.num);
  5378. DP_PRINT_STATS(" HW enqueue failed= %d",
  5379. pdev->stats.tx_i.dropped.enqueue_fail);
  5380. DP_PRINT_STATS(" Resources Full = %d",
  5381. pdev->stats.tx_i.dropped.res_full);
  5382. DP_PRINT_STATS(" FW removed Pkts = %u",
  5383. pdev->stats.tx.dropped.fw_rem.num);
  5384. DP_PRINT_STATS(" FW removed bytes= %llu",
  5385. pdev->stats.tx.dropped.fw_rem.bytes);
  5386. DP_PRINT_STATS(" FW removed transmitted = %d",
  5387. pdev->stats.tx.dropped.fw_rem_tx);
  5388. DP_PRINT_STATS(" FW removed untransmitted = %d",
  5389. pdev->stats.tx.dropped.fw_rem_notx);
  5390. DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
  5391. pdev->stats.tx.dropped.fw_reason1);
  5392. DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
  5393. pdev->stats.tx.dropped.fw_reason2);
  5394. DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
  5395. pdev->stats.tx.dropped.fw_reason3);
  5396. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  5397. pdev->stats.tx.dropped.age_out);
  5398. DP_PRINT_STATS(" headroom insufficient = %d",
  5399. pdev->stats.tx_i.dropped.headroom_insufficient);
  5400. DP_PRINT_STATS(" Multicast:");
  5401. DP_PRINT_STATS(" Packets: %u",
  5402. pdev->stats.tx.mcast.num);
  5403. DP_PRINT_STATS(" Bytes: %llu",
  5404. pdev->stats.tx.mcast.bytes);
  5405. DP_PRINT_STATS("Scatter Gather:");
  5406. DP_PRINT_STATS(" Packets = %d",
  5407. pdev->stats.tx_i.sg.sg_pkt.num);
  5408. DP_PRINT_STATS(" Bytes = %llu",
  5409. pdev->stats.tx_i.sg.sg_pkt.bytes);
  5410. DP_PRINT_STATS(" Dropped By Host = %d",
  5411. pdev->stats.tx_i.sg.dropped_host.num);
  5412. DP_PRINT_STATS(" Dropped By Target = %d",
  5413. pdev->stats.tx_i.sg.dropped_target);
  5414. DP_PRINT_STATS("TSO:");
  5415. DP_PRINT_STATS(" Number of Segments = %d",
  5416. pdev->stats.tx_i.tso.num_seg);
  5417. DP_PRINT_STATS(" Packets = %d",
  5418. pdev->stats.tx_i.tso.tso_pkt.num);
  5419. DP_PRINT_STATS(" Bytes = %llu",
  5420. pdev->stats.tx_i.tso.tso_pkt.bytes);
  5421. DP_PRINT_STATS(" Dropped By Host = %d",
  5422. pdev->stats.tx_i.tso.dropped_host.num);
  5423. DP_PRINT_STATS("Mcast Enhancement:");
  5424. DP_PRINT_STATS(" Packets = %d",
  5425. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  5426. DP_PRINT_STATS(" Bytes = %llu",
  5427. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  5428. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  5429. pdev->stats.tx_i.mcast_en.dropped_map_error);
  5430. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  5431. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  5432. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  5433. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  5434. DP_PRINT_STATS(" Unicast sent = %d",
  5435. pdev->stats.tx_i.mcast_en.ucast);
  5436. DP_PRINT_STATS("Raw:");
  5437. DP_PRINT_STATS(" Packets = %d",
  5438. pdev->stats.tx_i.raw.raw_pkt.num);
  5439. DP_PRINT_STATS(" Bytes = %llu",
  5440. pdev->stats.tx_i.raw.raw_pkt.bytes);
  5441. DP_PRINT_STATS(" DMA map error = %d",
  5442. pdev->stats.tx_i.raw.dma_map_error);
  5443. DP_PRINT_STATS("Reinjected:");
  5444. DP_PRINT_STATS(" Packets = %d",
  5445. pdev->stats.tx_i.reinject_pkts.num);
  5446. DP_PRINT_STATS(" Bytes = %llu\n",
  5447. pdev->stats.tx_i.reinject_pkts.bytes);
  5448. DP_PRINT_STATS("Inspected:");
  5449. DP_PRINT_STATS(" Packets = %d",
  5450. pdev->stats.tx_i.inspect_pkts.num);
  5451. DP_PRINT_STATS(" Bytes = %llu",
  5452. pdev->stats.tx_i.inspect_pkts.bytes);
  5453. DP_PRINT_STATS("Nawds Multicast:");
  5454. DP_PRINT_STATS(" Packets = %d",
  5455. pdev->stats.tx_i.nawds_mcast.num);
  5456. DP_PRINT_STATS(" Bytes = %llu",
  5457. pdev->stats.tx_i.nawds_mcast.bytes);
  5458. DP_PRINT_STATS("CCE Classified:");
  5459. DP_PRINT_STATS(" CCE Classified Packets: %u",
  5460. pdev->stats.tx_i.cce_classified);
  5461. DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
  5462. pdev->stats.tx_i.cce_classified_raw);
  5463. DP_PRINT_STATS("Mesh stats:");
  5464. DP_PRINT_STATS(" frames to firmware: %u",
  5465. pdev->stats.tx_i.mesh.exception_fw);
  5466. DP_PRINT_STATS(" completions from fw: %u",
  5467. pdev->stats.tx_i.mesh.completion_fw);
  5468. DP_PRINT_STATS("PPDU stats counter");
  5469. for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
  5470. DP_PRINT_STATS(" Tag[%d] = %llu", index,
  5471. pdev->stats.ppdu_stats_counter[index]);
  5472. }
  5473. }
  5474. /**
  5475. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  5476. * @pdev: DP_PDEV Handle
  5477. *
  5478. * Return: void
  5479. */
  5480. static inline void
  5481. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  5482. {
  5483. DP_PRINT_STATS("PDEV Rx Stats:\n");
  5484. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  5485. DP_PRINT_STATS(" Packets = %d %d %d %d",
  5486. pdev->stats.rx.rcvd_reo[0].num,
  5487. pdev->stats.rx.rcvd_reo[1].num,
  5488. pdev->stats.rx.rcvd_reo[2].num,
  5489. pdev->stats.rx.rcvd_reo[3].num);
  5490. DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
  5491. pdev->stats.rx.rcvd_reo[0].bytes,
  5492. pdev->stats.rx.rcvd_reo[1].bytes,
  5493. pdev->stats.rx.rcvd_reo[2].bytes,
  5494. pdev->stats.rx.rcvd_reo[3].bytes);
  5495. DP_PRINT_STATS("Replenished:");
  5496. DP_PRINT_STATS(" Packets = %d",
  5497. pdev->stats.replenish.pkts.num);
  5498. DP_PRINT_STATS(" Bytes = %llu",
  5499. pdev->stats.replenish.pkts.bytes);
  5500. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  5501. pdev->stats.buf_freelist);
  5502. DP_PRINT_STATS(" Low threshold intr = %d",
  5503. pdev->stats.replenish.low_thresh_intrs);
  5504. DP_PRINT_STATS("Dropped:");
  5505. DP_PRINT_STATS(" msdu_not_done = %d",
  5506. pdev->stats.dropped.msdu_not_done);
  5507. DP_PRINT_STATS(" mon_rx_drop = %d",
  5508. pdev->stats.dropped.mon_rx_drop);
  5509. DP_PRINT_STATS(" mec_drop = %d",
  5510. pdev->stats.rx.mec_drop.num);
  5511. DP_PRINT_STATS(" Bytes = %llu",
  5512. pdev->stats.rx.mec_drop.bytes);
  5513. DP_PRINT_STATS("Sent To Stack:");
  5514. DP_PRINT_STATS(" Packets = %d",
  5515. pdev->stats.rx.to_stack.num);
  5516. DP_PRINT_STATS(" Bytes = %llu",
  5517. pdev->stats.rx.to_stack.bytes);
  5518. DP_PRINT_STATS("Multicast/Broadcast:");
  5519. DP_PRINT_STATS(" Packets = %d",
  5520. pdev->stats.rx.multicast.num);
  5521. DP_PRINT_STATS(" Bytes = %llu",
  5522. pdev->stats.rx.multicast.bytes);
  5523. DP_PRINT_STATS("Errors:");
  5524. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  5525. pdev->stats.replenish.rxdma_err);
  5526. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  5527. pdev->stats.err.desc_alloc_fail);
  5528. DP_PRINT_STATS(" IP checksum error = %d",
  5529. pdev->stats.err.ip_csum_err);
  5530. DP_PRINT_STATS(" TCP/UDP checksum error = %d",
  5531. pdev->stats.err.tcp_udp_csum_err);
  5532. /* Get bar_recv_cnt */
  5533. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  5534. DP_PRINT_STATS("BAR Received Count: = %d",
  5535. pdev->stats.rx.bar_recv_cnt);
  5536. }
  5537. /**
  5538. * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
  5539. * @pdev: DP_PDEV Handle
  5540. *
  5541. * Return: void
  5542. */
  5543. static inline void
  5544. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  5545. {
  5546. struct cdp_pdev_mon_stats *rx_mon_stats;
  5547. rx_mon_stats = &pdev->rx_mon_stats;
  5548. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  5549. dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
  5550. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  5551. rx_mon_stats->status_ppdu_done);
  5552. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  5553. rx_mon_stats->dest_ppdu_done);
  5554. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  5555. rx_mon_stats->dest_mpdu_done);
  5556. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  5557. rx_mon_stats->dest_mpdu_drop);
  5558. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  5559. rx_mon_stats->dup_mon_linkdesc_cnt);
  5560. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  5561. rx_mon_stats->dup_mon_buf_cnt);
  5562. }
  5563. /**
  5564. * dp_print_soc_tx_stats(): Print SOC level stats
  5565. * @soc DP_SOC Handle
  5566. *
  5567. * Return: void
  5568. */
  5569. static inline void
  5570. dp_print_soc_tx_stats(struct dp_soc *soc)
  5571. {
  5572. uint8_t desc_pool_id;
  5573. soc->stats.tx.desc_in_use = 0;
  5574. DP_PRINT_STATS("SOC Tx Stats:\n");
  5575. for (desc_pool_id = 0;
  5576. desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5577. desc_pool_id++)
  5578. soc->stats.tx.desc_in_use +=
  5579. soc->tx_desc[desc_pool_id].num_allocated;
  5580. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  5581. soc->stats.tx.desc_in_use);
  5582. DP_PRINT_STATS("Invalid peer:");
  5583. DP_PRINT_STATS(" Packets = %d",
  5584. soc->stats.tx.tx_invalid_peer.num);
  5585. DP_PRINT_STATS(" Bytes = %llu",
  5586. soc->stats.tx.tx_invalid_peer.bytes);
  5587. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  5588. soc->stats.tx.tcl_ring_full[0],
  5589. soc->stats.tx.tcl_ring_full[1],
  5590. soc->stats.tx.tcl_ring_full[2]);
  5591. }
  5592. /**
  5593. * dp_print_soc_rx_stats: Print SOC level Rx stats
  5594. * @soc: DP_SOC Handle
  5595. *
  5596. * Return:void
  5597. */
  5598. static inline void
  5599. dp_print_soc_rx_stats(struct dp_soc *soc)
  5600. {
  5601. uint32_t i;
  5602. char reo_error[DP_REO_ERR_LENGTH];
  5603. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  5604. uint8_t index = 0;
  5605. DP_PRINT_STATS("SOC Rx Stats:\n");
  5606. DP_PRINT_STATS("Fragmented packets: %u",
  5607. soc->stats.rx.rx_frags);
  5608. DP_PRINT_STATS("Reo reinjected packets: %u",
  5609. soc->stats.rx.reo_reinject);
  5610. DP_PRINT_STATS("Errors:\n");
  5611. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  5612. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  5613. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  5614. DP_PRINT_STATS("Invalid RBM = %d",
  5615. soc->stats.rx.err.invalid_rbm);
  5616. DP_PRINT_STATS("Invalid Vdev = %d",
  5617. soc->stats.rx.err.invalid_vdev);
  5618. DP_PRINT_STATS("Invalid Pdev = %d",
  5619. soc->stats.rx.err.invalid_pdev);
  5620. DP_PRINT_STATS("Invalid Peer = %d",
  5621. soc->stats.rx.err.rx_invalid_peer.num);
  5622. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  5623. soc->stats.rx.err.hal_ring_access_fail);
  5624. DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
  5625. DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
  5626. DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
  5627. DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
  5628. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  5629. index += qdf_snprint(&rxdma_error[index],
  5630. DP_RXDMA_ERR_LENGTH - index,
  5631. " %d", soc->stats.rx.err.rxdma_error[i]);
  5632. }
  5633. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  5634. rxdma_error);
  5635. index = 0;
  5636. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  5637. index += qdf_snprint(&reo_error[index],
  5638. DP_REO_ERR_LENGTH - index,
  5639. " %d", soc->stats.rx.err.reo_error[i]);
  5640. }
  5641. DP_PRINT_STATS("REO Error(0-14):%s",
  5642. reo_error);
  5643. }
  5644. /**
  5645. * dp_srng_get_str_from_ring_type() - Return string name for a ring
  5646. * @ring_type: Ring
  5647. *
  5648. * Return: char const pointer
  5649. */
  5650. static inline const
  5651. char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
  5652. {
  5653. switch (ring_type) {
  5654. case REO_DST:
  5655. return "Reo_dst";
  5656. case REO_EXCEPTION:
  5657. return "Reo_exception";
  5658. case REO_CMD:
  5659. return "Reo_cmd";
  5660. case REO_REINJECT:
  5661. return "Reo_reinject";
  5662. case REO_STATUS:
  5663. return "Reo_status";
  5664. case WBM2SW_RELEASE:
  5665. return "wbm2sw_release";
  5666. case TCL_DATA:
  5667. return "tcl_data";
  5668. case TCL_CMD:
  5669. return "tcl_cmd";
  5670. case TCL_STATUS:
  5671. return "tcl_status";
  5672. case SW2WBM_RELEASE:
  5673. return "sw2wbm_release";
  5674. case RXDMA_BUF:
  5675. return "Rxdma_buf";
  5676. case RXDMA_DST:
  5677. return "Rxdma_dst";
  5678. case RXDMA_MONITOR_BUF:
  5679. return "Rxdma_monitor_buf";
  5680. case RXDMA_MONITOR_DESC:
  5681. return "Rxdma_monitor_desc";
  5682. case RXDMA_MONITOR_STATUS:
  5683. return "Rxdma_monitor_status";
  5684. default:
  5685. dp_err("Invalid ring type");
  5686. break;
  5687. }
  5688. return "Invalid";
  5689. }
  5690. /**
  5691. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  5692. * @soc: DP_SOC handle
  5693. * @srng: DP_SRNG handle
  5694. * @ring_name: SRNG name
  5695. * @ring_type: srng src/dst ring
  5696. *
  5697. * Return: void
  5698. */
  5699. static void
  5700. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  5701. enum hal_ring_type ring_type)
  5702. {
  5703. uint32_t tailp;
  5704. uint32_t headp;
  5705. int32_t hw_headp = -1;
  5706. int32_t hw_tailp = -1;
  5707. const char *ring_name;
  5708. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  5709. if (soc && srng && srng->hal_srng) {
  5710. ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
  5711. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  5712. DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
  5713. ring_name, headp, tailp);
  5714. hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
  5715. &hw_tailp, ring_type);
  5716. DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
  5717. ring_name, hw_headp, hw_tailp);
  5718. }
  5719. }
  5720. /**
  5721. * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
  5722. * on target
  5723. * @pdev: physical device handle
  5724. * @mac_id: mac id
  5725. *
  5726. * Return: void
  5727. */
  5728. static inline
  5729. void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
  5730. {
  5731. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  5732. dp_print_ring_stat_from_hal(pdev->soc,
  5733. &pdev->rxdma_mon_buf_ring[mac_id],
  5734. RXDMA_MONITOR_BUF);
  5735. dp_print_ring_stat_from_hal(pdev->soc,
  5736. &pdev->rxdma_mon_dst_ring[mac_id],
  5737. RXDMA_MONITOR_DST);
  5738. dp_print_ring_stat_from_hal(pdev->soc,
  5739. &pdev->rxdma_mon_desc_ring[mac_id],
  5740. RXDMA_MONITOR_DESC);
  5741. }
  5742. dp_print_ring_stat_from_hal(pdev->soc,
  5743. &pdev->rxdma_mon_status_ring[mac_id],
  5744. RXDMA_MONITOR_STATUS);
  5745. }
  5746. /**
  5747. * dp_print_ring_stats(): Print tail and head pointer
  5748. * @pdev: DP_PDEV handle
  5749. *
  5750. * Return:void
  5751. */
  5752. static inline void
  5753. dp_print_ring_stats(struct dp_pdev *pdev)
  5754. {
  5755. uint32_t i;
  5756. int mac_id;
  5757. dp_print_ring_stat_from_hal(pdev->soc,
  5758. &pdev->soc->reo_exception_ring,
  5759. REO_EXCEPTION);
  5760. dp_print_ring_stat_from_hal(pdev->soc,
  5761. &pdev->soc->reo_reinject_ring,
  5762. REO_REINJECT);
  5763. dp_print_ring_stat_from_hal(pdev->soc,
  5764. &pdev->soc->reo_cmd_ring,
  5765. REO_CMD);
  5766. dp_print_ring_stat_from_hal(pdev->soc,
  5767. &pdev->soc->reo_status_ring,
  5768. REO_STATUS);
  5769. dp_print_ring_stat_from_hal(pdev->soc,
  5770. &pdev->soc->rx_rel_ring,
  5771. WBM2SW_RELEASE);
  5772. dp_print_ring_stat_from_hal(pdev->soc,
  5773. &pdev->soc->tcl_cmd_ring,
  5774. TCL_CMD);
  5775. dp_print_ring_stat_from_hal(pdev->soc,
  5776. &pdev->soc->tcl_status_ring,
  5777. TCL_STATUS);
  5778. dp_print_ring_stat_from_hal(pdev->soc,
  5779. &pdev->soc->wbm_desc_rel_ring,
  5780. SW2WBM_RELEASE);
  5781. for (i = 0; i < MAX_REO_DEST_RINGS; i++)
  5782. dp_print_ring_stat_from_hal(pdev->soc,
  5783. &pdev->soc->reo_dest_ring[i],
  5784. REO_DST);
  5785. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
  5786. dp_print_ring_stat_from_hal(pdev->soc,
  5787. &pdev->soc->tcl_data_ring[i],
  5788. TCL_DATA);
  5789. for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
  5790. dp_print_ring_stat_from_hal(pdev->soc,
  5791. &pdev->soc->tx_comp_ring[i],
  5792. WBM2SW_RELEASE);
  5793. dp_print_ring_stat_from_hal(pdev->soc,
  5794. &pdev->rx_refill_buf_ring,
  5795. RXDMA_BUF);
  5796. dp_print_ring_stat_from_hal(pdev->soc,
  5797. &pdev->rx_refill_buf_ring2,
  5798. RXDMA_BUF);
  5799. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  5800. dp_print_ring_stat_from_hal(pdev->soc,
  5801. &pdev->rx_mac_buf_ring[i],
  5802. RXDMA_BUF);
  5803. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
  5804. dp_print_mon_ring_stat_from_hal(pdev, mac_id);
  5805. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
  5806. dp_print_ring_stat_from_hal(pdev->soc,
  5807. &pdev->rxdma_err_dst_ring[i],
  5808. RXDMA_DST);
  5809. }
  5810. /**
  5811. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  5812. * @vdev: DP_VDEV handle
  5813. *
  5814. * Return:void
  5815. */
  5816. static inline void
  5817. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  5818. {
  5819. struct dp_peer *peer = NULL;
  5820. DP_STATS_CLR(vdev->pdev);
  5821. DP_STATS_CLR(vdev->pdev->soc);
  5822. DP_STATS_CLR(vdev);
  5823. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  5824. if (!peer)
  5825. return;
  5826. DP_STATS_CLR(peer);
  5827. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5828. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5829. &peer->stats, peer->peer_ids[0],
  5830. UPDATE_PEER_STATS, vdev->pdev->pdev_id);
  5831. #endif
  5832. }
  5833. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  5834. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  5835. &vdev->stats, vdev->vdev_id,
  5836. UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
  5837. #endif
  5838. }
  5839. /**
  5840. * dp_print_common_rates_info(): Print common rate for tx or rx
  5841. * @pkt_type_array: rate type array contains rate info
  5842. *
  5843. * Return:void
  5844. */
  5845. static inline void
  5846. dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
  5847. {
  5848. uint8_t mcs, pkt_type;
  5849. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  5850. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  5851. if (!dp_rate_string[pkt_type][mcs].valid)
  5852. continue;
  5853. DP_PRINT_STATS(" %s = %d",
  5854. dp_rate_string[pkt_type][mcs].mcs_type,
  5855. pkt_type_array[pkt_type].mcs_count[mcs]);
  5856. }
  5857. DP_PRINT_STATS("\n");
  5858. }
  5859. }
  5860. /**
  5861. * dp_print_rx_rates(): Print Rx rate stats
  5862. * @vdev: DP_VDEV handle
  5863. *
  5864. * Return:void
  5865. */
  5866. static inline void
  5867. dp_print_rx_rates(struct dp_vdev *vdev)
  5868. {
  5869. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5870. uint8_t i;
  5871. uint8_t index = 0;
  5872. char nss[DP_NSS_LENGTH];
  5873. DP_PRINT_STATS("Rx Rate Info:\n");
  5874. dp_print_common_rates_info(pdev->stats.rx.pkt_type);
  5875. index = 0;
  5876. for (i = 0; i < SS_COUNT; i++) {
  5877. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5878. " %d", pdev->stats.rx.nss[i]);
  5879. }
  5880. DP_PRINT_STATS("NSS(1-8) = %s",
  5881. nss);
  5882. DP_PRINT_STATS("SGI ="
  5883. " 0.8us %d,"
  5884. " 0.4us %d,"
  5885. " 1.6us %d,"
  5886. " 3.2us %d,",
  5887. pdev->stats.rx.sgi_count[0],
  5888. pdev->stats.rx.sgi_count[1],
  5889. pdev->stats.rx.sgi_count[2],
  5890. pdev->stats.rx.sgi_count[3]);
  5891. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5892. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  5893. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  5894. DP_PRINT_STATS("Reception Type ="
  5895. " SU: %d,"
  5896. " MU_MIMO:%d,"
  5897. " MU_OFDMA:%d,"
  5898. " MU_OFDMA_MIMO:%d\n",
  5899. pdev->stats.rx.reception_type[0],
  5900. pdev->stats.rx.reception_type[1],
  5901. pdev->stats.rx.reception_type[2],
  5902. pdev->stats.rx.reception_type[3]);
  5903. DP_PRINT_STATS("Aggregation:\n");
  5904. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  5905. pdev->stats.rx.ampdu_cnt);
  5906. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  5907. pdev->stats.rx.non_ampdu_cnt);
  5908. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  5909. pdev->stats.rx.amsdu_cnt);
  5910. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  5911. pdev->stats.rx.non_amsdu_cnt);
  5912. }
  5913. /**
  5914. * dp_print_tx_rates(): Print tx rates
  5915. * @vdev: DP_VDEV handle
  5916. *
  5917. * Return:void
  5918. */
  5919. static inline void
  5920. dp_print_tx_rates(struct dp_vdev *vdev)
  5921. {
  5922. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  5923. uint8_t index;
  5924. char nss[DP_NSS_LENGTH];
  5925. int nss_index;
  5926. DP_PRINT_STATS("Tx Rate Info:\n");
  5927. dp_print_common_rates_info(pdev->stats.tx.pkt_type);
  5928. DP_PRINT_STATS("SGI ="
  5929. " 0.8us %d"
  5930. " 0.4us %d"
  5931. " 1.6us %d"
  5932. " 3.2us %d",
  5933. pdev->stats.tx.sgi_count[0],
  5934. pdev->stats.tx.sgi_count[1],
  5935. pdev->stats.tx.sgi_count[2],
  5936. pdev->stats.tx.sgi_count[3]);
  5937. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  5938. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  5939. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  5940. index = 0;
  5941. for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
  5942. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  5943. " %d", pdev->stats.tx.nss[nss_index]);
  5944. }
  5945. DP_PRINT_STATS("NSS(1-8) = %s", nss);
  5946. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  5947. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  5948. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  5949. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  5950. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  5951. DP_PRINT_STATS("Aggregation:\n");
  5952. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  5953. pdev->stats.tx.amsdu_cnt);
  5954. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  5955. pdev->stats.tx.non_amsdu_cnt);
  5956. }
  5957. /**
  5958. * dp_print_peer_stats():print peer stats
  5959. * @peer: DP_PEER handle
  5960. *
  5961. * return void
  5962. */
  5963. static inline void dp_print_peer_stats(struct dp_peer *peer)
  5964. {
  5965. uint8_t i;
  5966. uint32_t index;
  5967. char nss[DP_NSS_LENGTH];
  5968. DP_PRINT_STATS("Node Tx Stats:\n");
  5969. DP_PRINT_STATS("Total Packet Completions = %d",
  5970. peer->stats.tx.comp_pkt.num);
  5971. DP_PRINT_STATS("Total Bytes Completions = %llu",
  5972. peer->stats.tx.comp_pkt.bytes);
  5973. DP_PRINT_STATS("Success Packets = %d",
  5974. peer->stats.tx.tx_success.num);
  5975. DP_PRINT_STATS("Success Bytes = %llu",
  5976. peer->stats.tx.tx_success.bytes);
  5977. DP_PRINT_STATS("Unicast Success Packets = %d",
  5978. peer->stats.tx.ucast.num);
  5979. DP_PRINT_STATS("Unicast Success Bytes = %llu",
  5980. peer->stats.tx.ucast.bytes);
  5981. DP_PRINT_STATS("Multicast Success Packets = %d",
  5982. peer->stats.tx.mcast.num);
  5983. DP_PRINT_STATS("Multicast Success Bytes = %llu",
  5984. peer->stats.tx.mcast.bytes);
  5985. DP_PRINT_STATS("Broadcast Success Packets = %d",
  5986. peer->stats.tx.bcast.num);
  5987. DP_PRINT_STATS("Broadcast Success Bytes = %llu",
  5988. peer->stats.tx.bcast.bytes);
  5989. DP_PRINT_STATS("Packets Failed = %d",
  5990. peer->stats.tx.tx_failed);
  5991. DP_PRINT_STATS("Packets In OFDMA = %d",
  5992. peer->stats.tx.ofdma);
  5993. DP_PRINT_STATS("Packets In STBC = %d",
  5994. peer->stats.tx.stbc);
  5995. DP_PRINT_STATS("Packets In LDPC = %d",
  5996. peer->stats.tx.ldpc);
  5997. DP_PRINT_STATS("Packet Retries = %d",
  5998. peer->stats.tx.retries);
  5999. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  6000. peer->stats.tx.amsdu_cnt);
  6001. DP_PRINT_STATS("Last Packet RSSI = %d",
  6002. peer->stats.tx.last_ack_rssi);
  6003. DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
  6004. peer->stats.tx.dropped.fw_rem.num);
  6005. DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
  6006. peer->stats.tx.dropped.fw_rem.bytes);
  6007. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  6008. peer->stats.tx.dropped.fw_rem_tx);
  6009. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  6010. peer->stats.tx.dropped.fw_rem_notx);
  6011. DP_PRINT_STATS("Dropped : Age Out = %d",
  6012. peer->stats.tx.dropped.age_out);
  6013. DP_PRINT_STATS("NAWDS : ");
  6014. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  6015. peer->stats.tx.nawds_mcast_drop);
  6016. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  6017. peer->stats.tx.nawds_mcast.num);
  6018. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
  6019. peer->stats.tx.nawds_mcast.bytes);
  6020. DP_PRINT_STATS("Rate Info:");
  6021. dp_print_common_rates_info(peer->stats.tx.pkt_type);
  6022. DP_PRINT_STATS("SGI = "
  6023. " 0.8us %d"
  6024. " 0.4us %d"
  6025. " 1.6us %d"
  6026. " 3.2us %d",
  6027. peer->stats.tx.sgi_count[0],
  6028. peer->stats.tx.sgi_count[1],
  6029. peer->stats.tx.sgi_count[2],
  6030. peer->stats.tx.sgi_count[3]);
  6031. DP_PRINT_STATS("Excess Retries per AC ");
  6032. DP_PRINT_STATS(" Best effort = %d",
  6033. peer->stats.tx.excess_retries_per_ac[0]);
  6034. DP_PRINT_STATS(" Background= %d",
  6035. peer->stats.tx.excess_retries_per_ac[1]);
  6036. DP_PRINT_STATS(" Video = %d",
  6037. peer->stats.tx.excess_retries_per_ac[2]);
  6038. DP_PRINT_STATS(" Voice = %d",
  6039. peer->stats.tx.excess_retries_per_ac[3]);
  6040. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  6041. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  6042. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  6043. index = 0;
  6044. for (i = 0; i < SS_COUNT; i++) {
  6045. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6046. " %d", peer->stats.tx.nss[i]);
  6047. }
  6048. DP_PRINT_STATS("NSS(1-8) = %s",
  6049. nss);
  6050. DP_PRINT_STATS("Aggregation:");
  6051. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  6052. peer->stats.tx.amsdu_cnt);
  6053. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  6054. peer->stats.tx.non_amsdu_cnt);
  6055. DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
  6056. DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
  6057. peer->stats.tx.tx_byte_rate);
  6058. DP_PRINT_STATS(" Data transmitted in last sec: %d",
  6059. peer->stats.tx.tx_data_rate);
  6060. DP_PRINT_STATS("Node Rx Stats:");
  6061. DP_PRINT_STATS("Packets Sent To Stack = %d",
  6062. peer->stats.rx.to_stack.num);
  6063. DP_PRINT_STATS("Bytes Sent To Stack = %llu",
  6064. peer->stats.rx.to_stack.bytes);
  6065. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  6066. DP_PRINT_STATS("Ring Id = %d", i);
  6067. DP_PRINT_STATS(" Packets Received = %d",
  6068. peer->stats.rx.rcvd_reo[i].num);
  6069. DP_PRINT_STATS(" Bytes Received = %llu",
  6070. peer->stats.rx.rcvd_reo[i].bytes);
  6071. }
  6072. DP_PRINT_STATS("Multicast Packets Received = %d",
  6073. peer->stats.rx.multicast.num);
  6074. DP_PRINT_STATS("Multicast Bytes Received = %llu",
  6075. peer->stats.rx.multicast.bytes);
  6076. DP_PRINT_STATS("Broadcast Packets Received = %d",
  6077. peer->stats.rx.bcast.num);
  6078. DP_PRINT_STATS("Broadcast Bytes Received = %llu",
  6079. peer->stats.rx.bcast.bytes);
  6080. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  6081. peer->stats.rx.intra_bss.pkts.num);
  6082. DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
  6083. peer->stats.rx.intra_bss.pkts.bytes);
  6084. DP_PRINT_STATS("Raw Packets Received = %d",
  6085. peer->stats.rx.raw.num);
  6086. DP_PRINT_STATS("Raw Bytes Received = %llu",
  6087. peer->stats.rx.raw.bytes);
  6088. DP_PRINT_STATS("Errors: MIC Errors = %d",
  6089. peer->stats.rx.err.mic_err);
  6090. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  6091. peer->stats.rx.err.decrypt_err);
  6092. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  6093. peer->stats.rx.non_ampdu_cnt);
  6094. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  6095. peer->stats.rx.ampdu_cnt);
  6096. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  6097. peer->stats.rx.non_amsdu_cnt);
  6098. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  6099. peer->stats.rx.amsdu_cnt);
  6100. DP_PRINT_STATS("NAWDS : ");
  6101. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  6102. peer->stats.rx.nawds_mcast_drop);
  6103. DP_PRINT_STATS("SGI ="
  6104. " 0.8us %d"
  6105. " 0.4us %d"
  6106. " 1.6us %d"
  6107. " 3.2us %d",
  6108. peer->stats.rx.sgi_count[0],
  6109. peer->stats.rx.sgi_count[1],
  6110. peer->stats.rx.sgi_count[2],
  6111. peer->stats.rx.sgi_count[3]);
  6112. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  6113. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  6114. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  6115. DP_PRINT_STATS("Reception Type ="
  6116. " SU %d,"
  6117. " MU_MIMO %d,"
  6118. " MU_OFDMA %d,"
  6119. " MU_OFDMA_MIMO %d",
  6120. peer->stats.rx.reception_type[0],
  6121. peer->stats.rx.reception_type[1],
  6122. peer->stats.rx.reception_type[2],
  6123. peer->stats.rx.reception_type[3]);
  6124. dp_print_common_rates_info(peer->stats.rx.pkt_type);
  6125. index = 0;
  6126. for (i = 0; i < SS_COUNT; i++) {
  6127. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  6128. " %d", peer->stats.rx.nss[i]);
  6129. }
  6130. DP_PRINT_STATS("NSS(1-8) = %s",
  6131. nss);
  6132. DP_PRINT_STATS("Aggregation:");
  6133. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  6134. peer->stats.rx.ampdu_cnt);
  6135. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  6136. peer->stats.rx.non_ampdu_cnt);
  6137. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  6138. peer->stats.rx.amsdu_cnt);
  6139. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  6140. peer->stats.rx.non_amsdu_cnt);
  6141. DP_PRINT_STATS("Bytes and Packets received in last one sec:");
  6142. DP_PRINT_STATS(" Bytes received in last sec: %d",
  6143. peer->stats.rx.rx_byte_rate);
  6144. DP_PRINT_STATS(" Data received in last sec: %d",
  6145. peer->stats.rx.rx_data_rate);
  6146. }
  6147. /*
  6148. * dp_get_host_peer_stats()- function to print peer stats
  6149. * @pdev_handle: DP_PDEV handle
  6150. * @mac_addr: mac address of the peer
  6151. *
  6152. * Return: void
  6153. */
  6154. static void
  6155. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  6156. {
  6157. struct dp_peer *peer;
  6158. uint8_t local_id;
  6159. if (!mac_addr) {
  6160. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6161. "Invalid MAC address\n");
  6162. return;
  6163. }
  6164. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  6165. &local_id);
  6166. if (!peer) {
  6167. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  6168. "%s: Invalid peer\n", __func__);
  6169. return;
  6170. }
  6171. dp_print_peer_stats(peer);
  6172. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  6173. }
  6174. /**
  6175. * dp_print_soc_cfg_params()- Dump soc wlan config parameters
  6176. * @soc_handle: Soc handle
  6177. *
  6178. * Return: void
  6179. */
  6180. static void
  6181. dp_print_soc_cfg_params(struct dp_soc *soc)
  6182. {
  6183. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  6184. uint8_t index = 0, i = 0;
  6185. char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
  6186. int num_of_int_contexts;
  6187. if (!soc) {
  6188. dp_err("Context is null");
  6189. return;
  6190. }
  6191. soc_cfg_ctx = soc->wlan_cfg_ctx;
  6192. if (!soc_cfg_ctx) {
  6193. dp_err("Context is null");
  6194. return;
  6195. }
  6196. num_of_int_contexts =
  6197. wlan_cfg_get_num_contexts(soc_cfg_ctx);
  6198. DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
  6199. soc_cfg_ctx->num_int_ctxts);
  6200. DP_TRACE_STATS(DEBUG, "Max clients: %u",
  6201. soc_cfg_ctx->max_clients);
  6202. DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
  6203. soc_cfg_ctx->max_alloc_size);
  6204. DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
  6205. soc_cfg_ctx->per_pdev_tx_ring);
  6206. DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
  6207. soc_cfg_ctx->num_tcl_data_rings);
  6208. DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
  6209. soc_cfg_ctx->per_pdev_rx_ring);
  6210. DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
  6211. soc_cfg_ctx->per_pdev_lmac_ring);
  6212. DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
  6213. soc_cfg_ctx->num_reo_dest_rings);
  6214. DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
  6215. soc_cfg_ctx->num_tx_desc_pool);
  6216. DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
  6217. soc_cfg_ctx->num_tx_ext_desc_pool);
  6218. DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
  6219. soc_cfg_ctx->num_tx_desc);
  6220. DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
  6221. soc_cfg_ctx->num_tx_ext_desc);
  6222. DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
  6223. soc_cfg_ctx->htt_packet_type);
  6224. DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
  6225. soc_cfg_ctx->max_peer_id);
  6226. DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
  6227. soc_cfg_ctx->tx_ring_size);
  6228. DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
  6229. soc_cfg_ctx->tx_comp_ring_size);
  6230. DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
  6231. soc_cfg_ctx->tx_comp_ring_size_nss);
  6232. DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
  6233. soc_cfg_ctx->int_batch_threshold_tx);
  6234. DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
  6235. soc_cfg_ctx->int_timer_threshold_tx);
  6236. DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
  6237. soc_cfg_ctx->int_batch_threshold_rx);
  6238. DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
  6239. soc_cfg_ctx->int_timer_threshold_rx);
  6240. DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
  6241. soc_cfg_ctx->int_batch_threshold_other);
  6242. DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
  6243. soc_cfg_ctx->int_timer_threshold_other);
  6244. for (i = 0; i < num_of_int_contexts; i++) {
  6245. index += qdf_snprint(&ring_mask[index],
  6246. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6247. " %d",
  6248. soc_cfg_ctx->int_tx_ring_mask[i]);
  6249. }
  6250. DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
  6251. num_of_int_contexts, ring_mask);
  6252. index = 0;
  6253. for (i = 0; i < num_of_int_contexts; i++) {
  6254. index += qdf_snprint(&ring_mask[index],
  6255. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6256. " %d",
  6257. soc_cfg_ctx->int_rx_ring_mask[i]);
  6258. }
  6259. DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
  6260. num_of_int_contexts, ring_mask);
  6261. index = 0;
  6262. for (i = 0; i < num_of_int_contexts; i++) {
  6263. index += qdf_snprint(&ring_mask[index],
  6264. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6265. " %d",
  6266. soc_cfg_ctx->int_rx_mon_ring_mask[i]);
  6267. }
  6268. DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
  6269. num_of_int_contexts, ring_mask);
  6270. index = 0;
  6271. for (i = 0; i < num_of_int_contexts; i++) {
  6272. index += qdf_snprint(&ring_mask[index],
  6273. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6274. " %d",
  6275. soc_cfg_ctx->int_rx_err_ring_mask[i]);
  6276. }
  6277. DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
  6278. num_of_int_contexts, ring_mask);
  6279. index = 0;
  6280. for (i = 0; i < num_of_int_contexts; i++) {
  6281. index += qdf_snprint(&ring_mask[index],
  6282. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6283. " %d",
  6284. soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
  6285. }
  6286. DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
  6287. num_of_int_contexts, ring_mask);
  6288. index = 0;
  6289. for (i = 0; i < num_of_int_contexts; i++) {
  6290. index += qdf_snprint(&ring_mask[index],
  6291. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6292. " %d",
  6293. soc_cfg_ctx->int_reo_status_ring_mask[i]);
  6294. }
  6295. DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
  6296. num_of_int_contexts, ring_mask);
  6297. index = 0;
  6298. for (i = 0; i < num_of_int_contexts; i++) {
  6299. index += qdf_snprint(&ring_mask[index],
  6300. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6301. " %d",
  6302. soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
  6303. }
  6304. DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
  6305. num_of_int_contexts, ring_mask);
  6306. index = 0;
  6307. for (i = 0; i < num_of_int_contexts; i++) {
  6308. index += qdf_snprint(&ring_mask[index],
  6309. DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
  6310. " %d",
  6311. soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
  6312. }
  6313. DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
  6314. num_of_int_contexts, ring_mask);
  6315. DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
  6316. soc_cfg_ctx->rx_hash);
  6317. DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
  6318. soc_cfg_ctx->tso_enabled);
  6319. DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
  6320. soc_cfg_ctx->lro_enabled);
  6321. DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
  6322. soc_cfg_ctx->sg_enabled);
  6323. DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
  6324. soc_cfg_ctx->gro_enabled);
  6325. DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
  6326. soc_cfg_ctx->rawmode_enabled);
  6327. DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
  6328. soc_cfg_ctx->peer_flow_ctrl_enabled);
  6329. DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
  6330. soc_cfg_ctx->napi_enabled);
  6331. DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
  6332. soc_cfg_ctx->tcp_udp_checksumoffload);
  6333. DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
  6334. soc_cfg_ctx->defrag_timeout_check);
  6335. DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
  6336. soc_cfg_ctx->rx_defrag_min_timeout);
  6337. DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
  6338. soc_cfg_ctx->wbm_release_ring);
  6339. DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
  6340. soc_cfg_ctx->tcl_cmd_ring);
  6341. DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
  6342. soc_cfg_ctx->tcl_status_ring);
  6343. DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
  6344. soc_cfg_ctx->reo_reinject_ring);
  6345. DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
  6346. soc_cfg_ctx->rx_release_ring);
  6347. DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
  6348. soc_cfg_ctx->reo_exception_ring);
  6349. DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
  6350. soc_cfg_ctx->reo_cmd_ring);
  6351. DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
  6352. soc_cfg_ctx->reo_status_ring);
  6353. DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
  6354. soc_cfg_ctx->rxdma_refill_ring);
  6355. DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
  6356. soc_cfg_ctx->rxdma_err_dst_ring);
  6357. }
  6358. /**
  6359. * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
  6360. * @pdev_handle: DP pdev handle
  6361. *
  6362. * Return - void
  6363. */
  6364. static void
  6365. dp_print_pdev_cfg_params(struct dp_pdev *pdev)
  6366. {
  6367. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  6368. if (!pdev) {
  6369. dp_err("Context is null");
  6370. return;
  6371. }
  6372. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  6373. if (!pdev_cfg_ctx) {
  6374. dp_err("Context is null");
  6375. return;
  6376. }
  6377. DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
  6378. pdev_cfg_ctx->rx_dma_buf_ring_size);
  6379. DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
  6380. pdev_cfg_ctx->dma_mon_buf_ring_size);
  6381. DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
  6382. pdev_cfg_ctx->dma_mon_dest_ring_size);
  6383. DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
  6384. pdev_cfg_ctx->dma_mon_status_ring_size);
  6385. DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
  6386. pdev_cfg_ctx->rxdma_monitor_desc_ring);
  6387. DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
  6388. pdev_cfg_ctx->num_mac_rings);
  6389. }
  6390. /**
  6391. * dp_txrx_stats_help() - Helper function for Txrx_Stats
  6392. *
  6393. * Return: None
  6394. */
  6395. static void dp_txrx_stats_help(void)
  6396. {
  6397. dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
  6398. dp_info("stats_option:");
  6399. dp_info(" 1 -- HTT Tx Statistics");
  6400. dp_info(" 2 -- HTT Rx Statistics");
  6401. dp_info(" 3 -- HTT Tx HW Queue Statistics");
  6402. dp_info(" 4 -- HTT Tx HW Sched Statistics");
  6403. dp_info(" 5 -- HTT Error Statistics");
  6404. dp_info(" 6 -- HTT TQM Statistics");
  6405. dp_info(" 7 -- HTT TQM CMDQ Statistics");
  6406. dp_info(" 8 -- HTT TX_DE_CMN Statistics");
  6407. dp_info(" 9 -- HTT Tx Rate Statistics");
  6408. dp_info(" 10 -- HTT Rx Rate Statistics");
  6409. dp_info(" 11 -- HTT Peer Statistics");
  6410. dp_info(" 12 -- HTT Tx SelfGen Statistics");
  6411. dp_info(" 13 -- HTT Tx MU HWQ Statistics");
  6412. dp_info(" 14 -- HTT RING_IF_INFO Statistics");
  6413. dp_info(" 15 -- HTT SRNG Statistics");
  6414. dp_info(" 16 -- HTT SFM Info Statistics");
  6415. dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
  6416. dp_info(" 18 -- HTT Peer List Details");
  6417. dp_info(" 20 -- Clear Host Statistics");
  6418. dp_info(" 21 -- Host Rx Rate Statistics");
  6419. dp_info(" 22 -- Host Tx Rate Statistics");
  6420. dp_info(" 23 -- Host Tx Statistics");
  6421. dp_info(" 24 -- Host Rx Statistics");
  6422. dp_info(" 25 -- Host AST Statistics");
  6423. dp_info(" 26 -- Host SRNG PTR Statistics");
  6424. dp_info(" 27 -- Host Mon Statistics");
  6425. dp_info(" 28 -- Host REO Queue Statistics");
  6426. dp_info(" 29 -- Host Soc cfg param Statistics");
  6427. dp_info(" 30 -- Host pdev cfg param Statistics");
  6428. }
  6429. /**
  6430. * dp_print_host_stats()- Function to print the stats aggregated at host
  6431. * @vdev_handle: DP_VDEV handle
  6432. * @type: host stats type
  6433. *
  6434. * Return: 0 on success, print error message in case of failure
  6435. */
  6436. static int
  6437. dp_print_host_stats(struct cdp_vdev *vdev_handle,
  6438. struct cdp_txrx_stats_req *req)
  6439. {
  6440. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6441. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  6442. enum cdp_host_txrx_stats type =
  6443. dp_stats_mapping_table[req->stats][STATS_HOST];
  6444. dp_aggregate_pdev_stats(pdev);
  6445. switch (type) {
  6446. case TXRX_CLEAR_STATS:
  6447. dp_txrx_host_stats_clr(vdev);
  6448. break;
  6449. case TXRX_RX_RATE_STATS:
  6450. dp_print_rx_rates(vdev);
  6451. break;
  6452. case TXRX_TX_RATE_STATS:
  6453. dp_print_tx_rates(vdev);
  6454. break;
  6455. case TXRX_TX_HOST_STATS:
  6456. dp_print_pdev_tx_stats(pdev);
  6457. dp_print_soc_tx_stats(pdev->soc);
  6458. break;
  6459. case TXRX_RX_HOST_STATS:
  6460. dp_print_pdev_rx_stats(pdev);
  6461. dp_print_soc_rx_stats(pdev->soc);
  6462. break;
  6463. case TXRX_AST_STATS:
  6464. dp_print_ast_stats(pdev->soc);
  6465. dp_print_peer_table(vdev);
  6466. break;
  6467. case TXRX_SRNG_PTR_STATS:
  6468. dp_print_ring_stats(pdev);
  6469. break;
  6470. case TXRX_RX_MON_STATS:
  6471. dp_print_pdev_rx_mon_stats(pdev);
  6472. break;
  6473. case TXRX_REO_QUEUE_STATS:
  6474. dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
  6475. break;
  6476. case TXRX_SOC_CFG_PARAMS:
  6477. dp_print_soc_cfg_params(pdev->soc);
  6478. break;
  6479. case TXRX_PDEV_CFG_PARAMS:
  6480. dp_print_pdev_cfg_params(pdev);
  6481. break;
  6482. default:
  6483. dp_info("Wrong Input For TxRx Host Stats");
  6484. dp_txrx_stats_help();
  6485. break;
  6486. }
  6487. return 0;
  6488. }
  6489. /*
  6490. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  6491. * @pdev: DP_PDEV handle
  6492. *
  6493. * Return: void
  6494. */
  6495. static void
  6496. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  6497. {
  6498. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  6499. int mac_id;
  6500. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  6501. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6502. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6503. pdev->pdev_id);
  6504. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6505. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6506. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6507. }
  6508. }
  6509. /*
  6510. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  6511. * @pdev: DP_PDEV handle
  6512. *
  6513. * Return: void
  6514. */
  6515. static void
  6516. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  6517. {
  6518. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  6519. int mac_id;
  6520. htt_tlv_filter.mpdu_start = 1;
  6521. htt_tlv_filter.msdu_start = 0;
  6522. htt_tlv_filter.packet = 0;
  6523. htt_tlv_filter.msdu_end = 0;
  6524. htt_tlv_filter.mpdu_end = 0;
  6525. htt_tlv_filter.attention = 0;
  6526. htt_tlv_filter.ppdu_start = 1;
  6527. htt_tlv_filter.ppdu_end = 1;
  6528. htt_tlv_filter.ppdu_end_user_stats = 1;
  6529. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  6530. htt_tlv_filter.ppdu_end_status_done = 1;
  6531. htt_tlv_filter.enable_fp = 1;
  6532. htt_tlv_filter.enable_md = 0;
  6533. if (pdev->neighbour_peers_added &&
  6534. pdev->soc->hw_nac_monitor_support) {
  6535. htt_tlv_filter.enable_md = 1;
  6536. htt_tlv_filter.packet_header = 1;
  6537. }
  6538. if (pdev->mcopy_mode) {
  6539. htt_tlv_filter.packet_header = 1;
  6540. htt_tlv_filter.enable_mo = 1;
  6541. }
  6542. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  6543. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  6544. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  6545. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  6546. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  6547. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  6548. if (pdev->neighbour_peers_added &&
  6549. pdev->soc->hw_nac_monitor_support)
  6550. htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
  6551. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  6552. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
  6553. pdev->pdev_id);
  6554. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
  6555. pdev->rxdma_mon_status_ring[mac_id].hal_srng,
  6556. RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  6557. }
  6558. }
  6559. /*
  6560. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  6561. * modes are enabled or not.
  6562. * @dp_pdev: dp pdev handle.
  6563. *
  6564. * Return: bool
  6565. */
  6566. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  6567. {
  6568. if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
  6569. !pdev->mcopy_mode)
  6570. return true;
  6571. else
  6572. return false;
  6573. }
  6574. /*
  6575. *dp_set_bpr_enable() - API to enable/disable bpr feature
  6576. *@pdev_handle: DP_PDEV handle.
  6577. *@val: Provided value.
  6578. *
  6579. *Return: 0 for success. nonzero for failure.
  6580. */
  6581. static QDF_STATUS
  6582. dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
  6583. {
  6584. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6585. switch (val) {
  6586. case CDP_BPR_DISABLE:
  6587. pdev->bpr_enable = CDP_BPR_DISABLE;
  6588. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6589. !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  6590. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6591. } else if (pdev->enhanced_stats_en &&
  6592. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6593. !pdev->pktlog_ppdu_stats) {
  6594. dp_h2t_cfg_stats_msg_send(pdev,
  6595. DP_PPDU_STATS_CFG_ENH_STATS,
  6596. pdev->pdev_id);
  6597. }
  6598. break;
  6599. case CDP_BPR_ENABLE:
  6600. pdev->bpr_enable = CDP_BPR_ENABLE;
  6601. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
  6602. !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
  6603. dp_h2t_cfg_stats_msg_send(pdev,
  6604. DP_PPDU_STATS_CFG_BPR,
  6605. pdev->pdev_id);
  6606. } else if (pdev->enhanced_stats_en &&
  6607. !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  6608. !pdev->pktlog_ppdu_stats) {
  6609. dp_h2t_cfg_stats_msg_send(pdev,
  6610. DP_PPDU_STATS_CFG_BPR_ENH,
  6611. pdev->pdev_id);
  6612. } else if (pdev->pktlog_ppdu_stats) {
  6613. dp_h2t_cfg_stats_msg_send(pdev,
  6614. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  6615. pdev->pdev_id);
  6616. }
  6617. break;
  6618. default:
  6619. break;
  6620. }
  6621. return QDF_STATUS_SUCCESS;
  6622. }
  6623. /*
  6624. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  6625. * @pdev_handle: DP_PDEV handle
  6626. * @val: user provided value
  6627. *
  6628. * Return: 0 for success. nonzero for failure.
  6629. */
  6630. static QDF_STATUS
  6631. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  6632. {
  6633. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6634. QDF_STATUS status = QDF_STATUS_SUCCESS;
  6635. if (pdev->mcopy_mode)
  6636. dp_reset_monitor_mode(pdev_handle);
  6637. switch (val) {
  6638. case 0:
  6639. pdev->tx_sniffer_enable = 0;
  6640. pdev->mcopy_mode = 0;
  6641. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  6642. !pdev->bpr_enable) {
  6643. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6644. dp_ppdu_ring_reset(pdev);
  6645. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  6646. dp_h2t_cfg_stats_msg_send(pdev,
  6647. DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6648. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  6649. dp_h2t_cfg_stats_msg_send(pdev,
  6650. DP_PPDU_STATS_CFG_BPR_ENH,
  6651. pdev->pdev_id);
  6652. } else {
  6653. dp_h2t_cfg_stats_msg_send(pdev,
  6654. DP_PPDU_STATS_CFG_BPR,
  6655. pdev->pdev_id);
  6656. }
  6657. break;
  6658. case 1:
  6659. pdev->tx_sniffer_enable = 1;
  6660. pdev->mcopy_mode = 0;
  6661. if (!pdev->pktlog_ppdu_stats)
  6662. dp_h2t_cfg_stats_msg_send(pdev,
  6663. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6664. break;
  6665. case 2:
  6666. if (pdev->monitor_vdev) {
  6667. status = QDF_STATUS_E_RESOURCES;
  6668. break;
  6669. }
  6670. pdev->mcopy_mode = 1;
  6671. dp_pdev_configure_monitor_rings(pdev);
  6672. pdev->tx_sniffer_enable = 0;
  6673. if (!pdev->pktlog_ppdu_stats)
  6674. dp_h2t_cfg_stats_msg_send(pdev,
  6675. DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
  6676. break;
  6677. default:
  6678. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6679. "Invalid value");
  6680. break;
  6681. }
  6682. return status;
  6683. }
  6684. /*
  6685. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  6686. * @pdev_handle: DP_PDEV handle
  6687. *
  6688. * Return: void
  6689. */
  6690. static void
  6691. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6692. {
  6693. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6694. if (pdev->enhanced_stats_en == 0)
  6695. dp_cal_client_timer_start(pdev->cal_client_ctx);
  6696. pdev->enhanced_stats_en = 1;
  6697. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6698. !pdev->monitor_vdev)
  6699. dp_ppdu_ring_cfg(pdev);
  6700. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6701. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
  6702. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6703. dp_h2t_cfg_stats_msg_send(pdev,
  6704. DP_PPDU_STATS_CFG_BPR_ENH,
  6705. pdev->pdev_id);
  6706. }
  6707. }
  6708. /*
  6709. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  6710. * @pdev_handle: DP_PDEV handle
  6711. *
  6712. * Return: void
  6713. */
  6714. static void
  6715. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  6716. {
  6717. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6718. if (pdev->enhanced_stats_en == 1)
  6719. dp_cal_client_timer_stop(pdev->cal_client_ctx);
  6720. pdev->enhanced_stats_en = 0;
  6721. if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
  6722. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  6723. } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
  6724. dp_h2t_cfg_stats_msg_send(pdev,
  6725. DP_PPDU_STATS_CFG_BPR,
  6726. pdev->pdev_id);
  6727. }
  6728. if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
  6729. !pdev->monitor_vdev)
  6730. dp_ppdu_ring_reset(pdev);
  6731. }
  6732. /*
  6733. * dp_get_fw_peer_stats()- function to print peer stats
  6734. * @pdev_handle: DP_PDEV handle
  6735. * @mac_addr: mac address of the peer
  6736. * @cap: Type of htt stats requested
  6737. *
  6738. * Currently Supporting only MAC ID based requests Only
  6739. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  6740. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  6741. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  6742. *
  6743. * Return: void
  6744. */
  6745. static void
  6746. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  6747. uint32_t cap)
  6748. {
  6749. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6750. int i;
  6751. uint32_t config_param0 = 0;
  6752. uint32_t config_param1 = 0;
  6753. uint32_t config_param2 = 0;
  6754. uint32_t config_param3 = 0;
  6755. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  6756. config_param0 |= (1 << (cap + 1));
  6757. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  6758. config_param1 |= (1 << i);
  6759. }
  6760. config_param2 |= (mac_addr[0] & 0x000000ff);
  6761. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  6762. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  6763. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  6764. config_param3 |= (mac_addr[4] & 0x000000ff);
  6765. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  6766. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  6767. config_param0, config_param1, config_param2,
  6768. config_param3, 0, 0, 0);
  6769. }
  6770. /* This struct definition will be removed from here
  6771. * once it get added in FW headers*/
  6772. struct httstats_cmd_req {
  6773. uint32_t config_param0;
  6774. uint32_t config_param1;
  6775. uint32_t config_param2;
  6776. uint32_t config_param3;
  6777. int cookie;
  6778. u_int8_t stats_id;
  6779. };
  6780. /*
  6781. * dp_get_htt_stats: function to process the httstas request
  6782. * @pdev_handle: DP pdev handle
  6783. * @data: pointer to request data
  6784. * @data_len: length for request data
  6785. *
  6786. * return: void
  6787. */
  6788. static void
  6789. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  6790. {
  6791. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6792. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  6793. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  6794. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  6795. req->config_param0, req->config_param1,
  6796. req->config_param2, req->config_param3,
  6797. req->cookie, 0, 0);
  6798. }
  6799. /*
  6800. * dp_set_pdev_param: function to set parameters in pdev
  6801. * @pdev_handle: DP pdev handle
  6802. * @param: parameter type to be set
  6803. * @val: value of parameter to be set
  6804. *
  6805. * Return: 0 for success. nonzero for failure.
  6806. */
  6807. static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  6808. enum cdp_pdev_param_type param,
  6809. uint8_t val)
  6810. {
  6811. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6812. switch (param) {
  6813. case CDP_CONFIG_DEBUG_SNIFFER:
  6814. return dp_config_debug_sniffer(pdev_handle, val);
  6815. case CDP_CONFIG_BPR_ENABLE:
  6816. return dp_set_bpr_enable(pdev_handle, val);
  6817. case CDP_CONFIG_PRIMARY_RADIO:
  6818. pdev->is_primary = val;
  6819. break;
  6820. default:
  6821. return QDF_STATUS_E_INVAL;
  6822. }
  6823. return QDF_STATUS_SUCCESS;
  6824. }
  6825. /*
  6826. * dp_get_vdev_param: function to get parameters from vdev
  6827. * @param: parameter type to get value
  6828. *
  6829. * return: void
  6830. */
  6831. static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
  6832. enum cdp_vdev_param_type param)
  6833. {
  6834. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6835. uint32_t val;
  6836. switch (param) {
  6837. case CDP_ENABLE_WDS:
  6838. val = vdev->wds_enabled;
  6839. break;
  6840. case CDP_ENABLE_MEC:
  6841. val = vdev->mec_enabled;
  6842. break;
  6843. case CDP_ENABLE_DA_WAR:
  6844. val = vdev->da_war_enabled;
  6845. break;
  6846. default:
  6847. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6848. "param value %d is wrong\n",
  6849. param);
  6850. val = -1;
  6851. break;
  6852. }
  6853. return val;
  6854. }
  6855. /*
  6856. * dp_set_vdev_param: function to set parameters in vdev
  6857. * @param: parameter type to be set
  6858. * @val: value of parameter to be set
  6859. *
  6860. * return: void
  6861. */
  6862. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  6863. enum cdp_vdev_param_type param, uint32_t val)
  6864. {
  6865. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6866. switch (param) {
  6867. case CDP_ENABLE_WDS:
  6868. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6869. "wds_enable %d for vdev(%p) id(%d)\n",
  6870. val, vdev, vdev->vdev_id);
  6871. vdev->wds_enabled = val;
  6872. break;
  6873. case CDP_ENABLE_MEC:
  6874. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6875. "mec_enable %d for vdev(%p) id(%d)\n",
  6876. val, vdev, vdev->vdev_id);
  6877. vdev->mec_enabled = val;
  6878. break;
  6879. case CDP_ENABLE_DA_WAR:
  6880. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  6881. "da_war_enable %d for vdev(%p) id(%d)\n",
  6882. val, vdev, vdev->vdev_id);
  6883. vdev->da_war_enabled = val;
  6884. break;
  6885. case CDP_ENABLE_NAWDS:
  6886. vdev->nawds_enabled = val;
  6887. break;
  6888. case CDP_ENABLE_MCAST_EN:
  6889. vdev->mcast_enhancement_en = val;
  6890. break;
  6891. case CDP_ENABLE_PROXYSTA:
  6892. vdev->proxysta_vdev = val;
  6893. break;
  6894. case CDP_UPDATE_TDLS_FLAGS:
  6895. vdev->tdls_link_connected = val;
  6896. break;
  6897. case CDP_CFG_WDS_AGING_TIMER:
  6898. if (val == 0)
  6899. qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
  6900. else if (val != vdev->wds_aging_timer_val)
  6901. qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
  6902. vdev->wds_aging_timer_val = val;
  6903. break;
  6904. case CDP_ENABLE_AP_BRIDGE:
  6905. if (wlan_op_mode_sta != vdev->opmode)
  6906. vdev->ap_bridge_enabled = val;
  6907. else
  6908. vdev->ap_bridge_enabled = false;
  6909. break;
  6910. case CDP_ENABLE_CIPHER:
  6911. vdev->sec_type = val;
  6912. break;
  6913. case CDP_ENABLE_QWRAP_ISOLATION:
  6914. vdev->isolation_vdev = val;
  6915. break;
  6916. default:
  6917. break;
  6918. }
  6919. dp_tx_vdev_update_search_flags(vdev);
  6920. }
  6921. /**
  6922. * dp_peer_set_nawds: set nawds bit in peer
  6923. * @peer_handle: pointer to peer
  6924. * @value: enable/disable nawds
  6925. *
  6926. * return: void
  6927. */
  6928. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  6929. {
  6930. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6931. peer->nawds_enabled = value;
  6932. }
  6933. /*
  6934. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  6935. * @vdev_handle: DP_VDEV handle
  6936. * @map_id:ID of map that needs to be updated
  6937. *
  6938. * Return: void
  6939. */
  6940. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  6941. uint8_t map_id)
  6942. {
  6943. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6944. vdev->dscp_tid_map_id = map_id;
  6945. return;
  6946. }
  6947. /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
  6948. * @peer_handle: DP pdev handle
  6949. *
  6950. * return : cdp_pdev_stats pointer
  6951. */
  6952. static struct cdp_pdev_stats*
  6953. dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
  6954. {
  6955. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  6956. dp_aggregate_pdev_stats(pdev);
  6957. return &pdev->stats;
  6958. }
  6959. /* dp_txrx_get_peer_stats - will return cdp_peer_stats
  6960. * @peer_handle: DP_PEER handle
  6961. *
  6962. * return : cdp_peer_stats pointer
  6963. */
  6964. static struct cdp_peer_stats*
  6965. dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
  6966. {
  6967. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6968. qdf_assert(peer);
  6969. return &peer->stats;
  6970. }
  6971. /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
  6972. * @peer_handle: DP_PEER handle
  6973. *
  6974. * return : void
  6975. */
  6976. static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
  6977. {
  6978. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  6979. qdf_assert(peer);
  6980. qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
  6981. }
  6982. /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
  6983. * @vdev_handle: DP_VDEV handle
  6984. * @buf: buffer for vdev stats
  6985. *
  6986. * return : int
  6987. */
  6988. static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
  6989. bool is_aggregate)
  6990. {
  6991. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  6992. struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
  6993. if (is_aggregate)
  6994. dp_aggregate_vdev_stats(vdev, buf);
  6995. else
  6996. qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
  6997. return 0;
  6998. }
  6999. /*
  7000. * dp_get_total_per(): get total per
  7001. * @pdev_handle: DP_PDEV handle
  7002. *
  7003. * Return: % error rate using retries per packet and success packets
  7004. */
  7005. static int dp_get_total_per(struct cdp_pdev *pdev_handle)
  7006. {
  7007. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7008. dp_aggregate_pdev_stats(pdev);
  7009. if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
  7010. return 0;
  7011. return ((pdev->stats.tx.retries * 100) /
  7012. ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
  7013. }
  7014. /*
  7015. * dp_txrx_stats_publish(): publish pdev stats into a buffer
  7016. * @pdev_handle: DP_PDEV handle
  7017. * @buf: to hold pdev_stats
  7018. *
  7019. * Return: int
  7020. */
  7021. static int
  7022. dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
  7023. {
  7024. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7025. struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
  7026. struct cdp_txrx_stats_req req = {0,};
  7027. dp_aggregate_pdev_stats(pdev);
  7028. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
  7029. req.cookie_val = 1;
  7030. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7031. req.param1, req.param2, req.param3, 0,
  7032. req.cookie_val, 0);
  7033. msleep(DP_MAX_SLEEP_TIME);
  7034. req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
  7035. req.cookie_val = 1;
  7036. dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
  7037. req.param1, req.param2, req.param3, 0,
  7038. req.cookie_val, 0);
  7039. msleep(DP_MAX_SLEEP_TIME);
  7040. qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
  7041. return TXRX_STATS_LEVEL;
  7042. }
  7043. /**
  7044. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  7045. * @pdev: DP_PDEV handle
  7046. * @map_id: ID of map that needs to be updated
  7047. * @tos: index value in map
  7048. * @tid: tid value passed by the user
  7049. *
  7050. * Return: void
  7051. */
  7052. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  7053. uint8_t map_id, uint8_t tos, uint8_t tid)
  7054. {
  7055. uint8_t dscp;
  7056. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  7057. struct dp_soc *soc = pdev->soc;
  7058. if (!soc)
  7059. return;
  7060. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  7061. pdev->dscp_tid_map[map_id][dscp] = tid;
  7062. if (map_id < soc->num_hw_dscp_tid_map)
  7063. hal_tx_update_dscp_tid(soc->hal_soc, tid,
  7064. map_id, dscp);
  7065. return;
  7066. }
  7067. /**
  7068. * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
  7069. * @pdev_handle: pdev handle
  7070. * @val: hmmc-dscp flag value
  7071. *
  7072. * Return: void
  7073. */
  7074. static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
  7075. bool val)
  7076. {
  7077. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7078. pdev->hmmc_tid_override_en = val;
  7079. }
  7080. /**
  7081. * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
  7082. * @pdev_handle: pdev handle
  7083. * @tid: tid value
  7084. *
  7085. * Return: void
  7086. */
  7087. static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
  7088. uint8_t tid)
  7089. {
  7090. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  7091. pdev->hmmc_tid = tid;
  7092. }
  7093. /**
  7094. * dp_fw_stats_process(): Process TxRX FW stats request
  7095. * @vdev_handle: DP VDEV handle
  7096. * @req: stats request
  7097. *
  7098. * return: int
  7099. */
  7100. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  7101. struct cdp_txrx_stats_req *req)
  7102. {
  7103. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7104. struct dp_pdev *pdev = NULL;
  7105. uint32_t stats = req->stats;
  7106. uint8_t mac_id = req->mac_id;
  7107. if (!vdev) {
  7108. DP_TRACE(NONE, "VDEV not found");
  7109. return 1;
  7110. }
  7111. pdev = vdev->pdev;
  7112. /*
  7113. * For HTT_DBG_EXT_STATS_RESET command, FW need to config
  7114. * from param0 to param3 according to below rule:
  7115. *
  7116. * PARAM:
  7117. * - config_param0 : start_offset (stats type)
  7118. * - config_param1 : stats bmask from start offset
  7119. * - config_param2 : stats bmask from start offset + 32
  7120. * - config_param3 : stats bmask from start offset + 64
  7121. */
  7122. if (req->stats == CDP_TXRX_STATS_0) {
  7123. req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
  7124. req->param1 = 0xFFFFFFFF;
  7125. req->param2 = 0xFFFFFFFF;
  7126. req->param3 = 0xFFFFFFFF;
  7127. } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
  7128. req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
  7129. }
  7130. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  7131. req->param1, req->param2, req->param3,
  7132. 0, 0, mac_id);
  7133. }
  7134. /**
  7135. * dp_txrx_stats_request - function to map to firmware and host stats
  7136. * @vdev: virtual handle
  7137. * @req: stats request
  7138. *
  7139. * Return: QDF_STATUS
  7140. */
  7141. static
  7142. QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
  7143. struct cdp_txrx_stats_req *req)
  7144. {
  7145. int host_stats;
  7146. int fw_stats;
  7147. enum cdp_stats stats;
  7148. int num_stats;
  7149. if (!vdev || !req) {
  7150. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7151. "Invalid vdev/req instance");
  7152. return QDF_STATUS_E_INVAL;
  7153. }
  7154. stats = req->stats;
  7155. if (stats >= CDP_TXRX_MAX_STATS)
  7156. return QDF_STATUS_E_INVAL;
  7157. /*
  7158. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  7159. * has to be updated if new FW HTT stats added
  7160. */
  7161. if (stats > CDP_TXRX_STATS_HTT_MAX)
  7162. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  7163. num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
  7164. if (stats >= num_stats) {
  7165. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7166. "%s: Invalid stats option: %d", __func__, stats);
  7167. return QDF_STATUS_E_INVAL;
  7168. }
  7169. req->stats = stats;
  7170. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  7171. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  7172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7173. "stats: %u fw_stats_type: %d host_stats: %d",
  7174. stats, fw_stats, host_stats);
  7175. if (fw_stats != TXRX_FW_STATS_INVALID) {
  7176. /* update request with FW stats type */
  7177. req->stats = fw_stats;
  7178. return dp_fw_stats_process(vdev, req);
  7179. }
  7180. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  7181. (host_stats <= TXRX_HOST_STATS_MAX))
  7182. return dp_print_host_stats(vdev, req);
  7183. else
  7184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7185. "Wrong Input for TxRx Stats");
  7186. return QDF_STATUS_SUCCESS;
  7187. }
  7188. /*
  7189. * dp_print_napi_stats(): NAPI stats
  7190. * @soc - soc handle
  7191. */
  7192. static void dp_print_napi_stats(struct dp_soc *soc)
  7193. {
  7194. hif_print_napi_stats(soc->hif_handle);
  7195. }
  7196. /*
  7197. * dp_print_per_ring_stats(): Packet count per ring
  7198. * @soc - soc handle
  7199. */
  7200. static void dp_print_per_ring_stats(struct dp_soc *soc)
  7201. {
  7202. uint8_t ring;
  7203. uint16_t core;
  7204. uint64_t total_packets;
  7205. DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
  7206. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  7207. total_packets = 0;
  7208. DP_TRACE_STATS(INFO_HIGH,
  7209. "Packets on ring %u:", ring);
  7210. for (core = 0; core < NR_CPUS; core++) {
  7211. DP_TRACE_STATS(INFO_HIGH,
  7212. "Packets arriving on core %u: %llu",
  7213. core,
  7214. soc->stats.rx.ring_packets[core][ring]);
  7215. total_packets += soc->stats.rx.ring_packets[core][ring];
  7216. }
  7217. DP_TRACE_STATS(INFO_HIGH,
  7218. "Total packets on ring %u: %llu",
  7219. ring, total_packets);
  7220. }
  7221. }
  7222. /*
  7223. * dp_txrx_path_stats() - Function to display dump stats
  7224. * @soc - soc handle
  7225. *
  7226. * return: none
  7227. */
  7228. static void dp_txrx_path_stats(struct dp_soc *soc)
  7229. {
  7230. uint8_t error_code;
  7231. uint8_t loop_pdev;
  7232. struct dp_pdev *pdev;
  7233. uint8_t i;
  7234. if (!soc) {
  7235. DP_TRACE(ERROR, "%s: Invalid access",
  7236. __func__);
  7237. return;
  7238. }
  7239. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  7240. pdev = soc->pdev_list[loop_pdev];
  7241. dp_aggregate_pdev_stats(pdev);
  7242. DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
  7243. DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
  7244. pdev->stats.tx_i.rcvd.num,
  7245. pdev->stats.tx_i.rcvd.bytes);
  7246. DP_TRACE_STATS(INFO_HIGH,
  7247. "processed from host: %u msdus (%llu bytes)",
  7248. pdev->stats.tx_i.processed.num,
  7249. pdev->stats.tx_i.processed.bytes);
  7250. DP_TRACE_STATS(INFO_HIGH,
  7251. "successfully transmitted: %u msdus (%llu bytes)",
  7252. pdev->stats.tx.tx_success.num,
  7253. pdev->stats.tx.tx_success.bytes);
  7254. DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
  7255. DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
  7256. pdev->stats.tx_i.dropped.dropped_pkt.num);
  7257. DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
  7258. pdev->stats.tx_i.dropped.desc_na.num);
  7259. DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
  7260. pdev->stats.tx_i.dropped.ring_full);
  7261. DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
  7262. pdev->stats.tx_i.dropped.enqueue_fail);
  7263. DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
  7264. pdev->stats.tx_i.dropped.dma_error);
  7265. DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
  7266. DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
  7267. pdev->stats.tx.tx_failed);
  7268. DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
  7269. pdev->stats.tx.dropped.age_out);
  7270. DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
  7271. pdev->stats.tx.dropped.fw_rem.num);
  7272. DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
  7273. pdev->stats.tx.dropped.fw_rem.bytes);
  7274. DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
  7275. pdev->stats.tx.dropped.fw_rem_tx);
  7276. DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
  7277. pdev->stats.tx.dropped.fw_rem_notx);
  7278. DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
  7279. pdev->soc->stats.tx.tx_invalid_peer.num);
  7280. DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
  7281. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7282. pdev->stats.tx_comp_histogram.pkts_1);
  7283. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7284. pdev->stats.tx_comp_histogram.pkts_2_20);
  7285. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7286. pdev->stats.tx_comp_histogram.pkts_21_40);
  7287. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7288. pdev->stats.tx_comp_histogram.pkts_41_60);
  7289. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7290. pdev->stats.tx_comp_histogram.pkts_61_80);
  7291. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7292. pdev->stats.tx_comp_histogram.pkts_81_100);
  7293. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7294. pdev->stats.tx_comp_histogram.pkts_101_200);
  7295. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7296. pdev->stats.tx_comp_histogram.pkts_201_plus);
  7297. DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
  7298. DP_TRACE_STATS(INFO_HIGH,
  7299. "delivered %u msdus ( %llu bytes),",
  7300. pdev->stats.rx.to_stack.num,
  7301. pdev->stats.rx.to_stack.bytes);
  7302. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  7303. DP_TRACE_STATS(INFO_HIGH,
  7304. "received on reo[%d] %u msdus( %llu bytes),",
  7305. i, pdev->stats.rx.rcvd_reo[i].num,
  7306. pdev->stats.rx.rcvd_reo[i].bytes);
  7307. DP_TRACE_STATS(INFO_HIGH,
  7308. "intra-bss packets %u msdus ( %llu bytes),",
  7309. pdev->stats.rx.intra_bss.pkts.num,
  7310. pdev->stats.rx.intra_bss.pkts.bytes);
  7311. DP_TRACE_STATS(INFO_HIGH,
  7312. "intra-bss fails %u msdus ( %llu bytes),",
  7313. pdev->stats.rx.intra_bss.fail.num,
  7314. pdev->stats.rx.intra_bss.fail.bytes);
  7315. DP_TRACE_STATS(INFO_HIGH,
  7316. "raw packets %u msdus ( %llu bytes),",
  7317. pdev->stats.rx.raw.num,
  7318. pdev->stats.rx.raw.bytes);
  7319. DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
  7320. pdev->stats.rx.err.mic_err);
  7321. DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
  7322. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  7323. DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
  7324. DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
  7325. pdev->soc->stats.rx.err.invalid_rbm);
  7326. DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
  7327. pdev->soc->stats.rx.err.hal_ring_access_fail);
  7328. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  7329. error_code++) {
  7330. if (!pdev->soc->stats.rx.err.reo_error[error_code])
  7331. continue;
  7332. DP_TRACE_STATS(INFO_HIGH,
  7333. "Reo error number (%u): %u msdus",
  7334. error_code,
  7335. pdev->soc->stats.rx.err
  7336. .reo_error[error_code]);
  7337. }
  7338. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  7339. error_code++) {
  7340. if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
  7341. continue;
  7342. DP_TRACE_STATS(INFO_HIGH,
  7343. "Rxdma error number (%u): %u msdus",
  7344. error_code,
  7345. pdev->soc->stats.rx.err
  7346. .rxdma_error[error_code]);
  7347. }
  7348. DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
  7349. DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
  7350. pdev->stats.rx_ind_histogram.pkts_1);
  7351. DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
  7352. pdev->stats.rx_ind_histogram.pkts_2_20);
  7353. DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
  7354. pdev->stats.rx_ind_histogram.pkts_21_40);
  7355. DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
  7356. pdev->stats.rx_ind_histogram.pkts_41_60);
  7357. DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
  7358. pdev->stats.rx_ind_histogram.pkts_61_80);
  7359. DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
  7360. pdev->stats.rx_ind_histogram.pkts_81_100);
  7361. DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
  7362. pdev->stats.rx_ind_histogram.pkts_101_200);
  7363. DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
  7364. pdev->stats.rx_ind_histogram.pkts_201_plus);
  7365. DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  7366. __func__,
  7367. pdev->soc->wlan_cfg_ctx
  7368. ->tso_enabled,
  7369. pdev->soc->wlan_cfg_ctx
  7370. ->lro_enabled,
  7371. pdev->soc->wlan_cfg_ctx
  7372. ->rx_hash,
  7373. pdev->soc->wlan_cfg_ctx
  7374. ->napi_enabled);
  7375. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7376. DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  7377. __func__,
  7378. pdev->soc->wlan_cfg_ctx
  7379. ->tx_flow_stop_queue_threshold,
  7380. pdev->soc->wlan_cfg_ctx
  7381. ->tx_flow_start_queue_offset);
  7382. #endif
  7383. }
  7384. }
  7385. /*
  7386. * dp_txrx_dump_stats() - Dump statistics
  7387. * @value - Statistics option
  7388. */
  7389. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  7390. enum qdf_stats_verbosity_level level)
  7391. {
  7392. struct dp_soc *soc =
  7393. (struct dp_soc *)psoc;
  7394. QDF_STATUS status = QDF_STATUS_SUCCESS;
  7395. if (!soc) {
  7396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7397. "%s: soc is NULL", __func__);
  7398. return QDF_STATUS_E_INVAL;
  7399. }
  7400. switch (value) {
  7401. case CDP_TXRX_PATH_STATS:
  7402. dp_txrx_path_stats(soc);
  7403. break;
  7404. case CDP_RX_RING_STATS:
  7405. dp_print_per_ring_stats(soc);
  7406. break;
  7407. case CDP_TXRX_TSO_STATS:
  7408. /* TODO: NOT IMPLEMENTED */
  7409. break;
  7410. case CDP_DUMP_TX_FLOW_POOL_INFO:
  7411. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  7412. break;
  7413. case CDP_DP_NAPI_STATS:
  7414. dp_print_napi_stats(soc);
  7415. break;
  7416. case CDP_TXRX_DESC_STATS:
  7417. /* TODO: NOT IMPLEMENTED */
  7418. break;
  7419. default:
  7420. status = QDF_STATUS_E_INVAL;
  7421. break;
  7422. }
  7423. return status;
  7424. }
  7425. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  7426. /**
  7427. * dp_update_flow_control_parameters() - API to store datapath
  7428. * config parameters
  7429. * @soc: soc handle
  7430. * @cfg: ini parameter handle
  7431. *
  7432. * Return: void
  7433. */
  7434. static inline
  7435. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7436. struct cdp_config_params *params)
  7437. {
  7438. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  7439. params->tx_flow_stop_queue_threshold;
  7440. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  7441. params->tx_flow_start_queue_offset;
  7442. }
  7443. #else
  7444. static inline
  7445. void dp_update_flow_control_parameters(struct dp_soc *soc,
  7446. struct cdp_config_params *params)
  7447. {
  7448. }
  7449. #endif
  7450. /**
  7451. * dp_update_config_parameters() - API to store datapath
  7452. * config parameters
  7453. * @soc: soc handle
  7454. * @cfg: ini parameter handle
  7455. *
  7456. * Return: status
  7457. */
  7458. static
  7459. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  7460. struct cdp_config_params *params)
  7461. {
  7462. struct dp_soc *soc = (struct dp_soc *)psoc;
  7463. if (!(soc)) {
  7464. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  7465. "%s: Invalid handle", __func__);
  7466. return QDF_STATUS_E_INVAL;
  7467. }
  7468. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  7469. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  7470. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  7471. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  7472. params->tcp_udp_checksumoffload;
  7473. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  7474. soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
  7475. soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
  7476. dp_update_flow_control_parameters(soc, params);
  7477. return QDF_STATUS_SUCCESS;
  7478. }
  7479. /**
  7480. * dp_txrx_set_wds_rx_policy() - API to store datapath
  7481. * config parameters
  7482. * @vdev_handle - datapath vdev handle
  7483. * @cfg: ini parameter handle
  7484. *
  7485. * Return: status
  7486. */
  7487. #ifdef WDS_VENDOR_EXTENSION
  7488. void
  7489. dp_txrx_set_wds_rx_policy(
  7490. struct cdp_vdev *vdev_handle,
  7491. u_int32_t val)
  7492. {
  7493. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7494. struct dp_peer *peer;
  7495. if (vdev->opmode == wlan_op_mode_ap) {
  7496. /* for ap, set it on bss_peer */
  7497. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  7498. if (peer->bss_peer) {
  7499. peer->wds_ecm.wds_rx_filter = 1;
  7500. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7501. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7502. break;
  7503. }
  7504. }
  7505. } else if (vdev->opmode == wlan_op_mode_sta) {
  7506. peer = TAILQ_FIRST(&vdev->peer_list);
  7507. peer->wds_ecm.wds_rx_filter = 1;
  7508. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  7509. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  7510. }
  7511. }
  7512. /**
  7513. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  7514. *
  7515. * @peer_handle - datapath peer handle
  7516. * @wds_tx_ucast: policy for unicast transmission
  7517. * @wds_tx_mcast: policy for multicast transmission
  7518. *
  7519. * Return: void
  7520. */
  7521. void
  7522. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  7523. int wds_tx_ucast, int wds_tx_mcast)
  7524. {
  7525. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  7526. if (wds_tx_ucast || wds_tx_mcast) {
  7527. peer->wds_enabled = 1;
  7528. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  7529. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  7530. } else {
  7531. peer->wds_enabled = 0;
  7532. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  7533. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  7534. }
  7535. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7536. FL("Policy Update set to :\
  7537. peer->wds_enabled %d\
  7538. peer->wds_ecm.wds_tx_ucast_4addr %d\
  7539. peer->wds_ecm.wds_tx_mcast_4addr %d"),
  7540. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  7541. peer->wds_ecm.wds_tx_mcast_4addr);
  7542. return;
  7543. }
  7544. #endif
  7545. static struct cdp_wds_ops dp_ops_wds = {
  7546. .vdev_set_wds = dp_vdev_set_wds,
  7547. #ifdef WDS_VENDOR_EXTENSION
  7548. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  7549. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  7550. #endif
  7551. };
  7552. /*
  7553. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  7554. * @vdev_handle - datapath vdev handle
  7555. * @callback - callback function
  7556. * @ctxt: callback context
  7557. *
  7558. */
  7559. static void
  7560. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  7561. ol_txrx_data_tx_cb callback, void *ctxt)
  7562. {
  7563. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7564. vdev->tx_non_std_data_callback.func = callback;
  7565. vdev->tx_non_std_data_callback.ctxt = ctxt;
  7566. }
  7567. /**
  7568. * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
  7569. * @pdev_hdl: datapath pdev handle
  7570. *
  7571. * Return: opaque pointer to dp txrx handle
  7572. */
  7573. static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
  7574. {
  7575. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7576. return pdev->dp_txrx_handle;
  7577. }
  7578. /**
  7579. * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
  7580. * @pdev_hdl: datapath pdev handle
  7581. * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
  7582. *
  7583. * Return: void
  7584. */
  7585. static void
  7586. dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
  7587. {
  7588. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  7589. pdev->dp_txrx_handle = dp_txrx_hdl;
  7590. }
  7591. /**
  7592. * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
  7593. * @soc_handle: datapath soc handle
  7594. *
  7595. * Return: opaque pointer to external dp (non-core DP)
  7596. */
  7597. static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
  7598. {
  7599. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7600. return soc->external_txrx_handle;
  7601. }
  7602. /**
  7603. * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
  7604. * @soc_handle: datapath soc handle
  7605. * @txrx_handle: opaque pointer to external dp (non-core DP)
  7606. *
  7607. * Return: void
  7608. */
  7609. static void
  7610. dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
  7611. {
  7612. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7613. soc->external_txrx_handle = txrx_handle;
  7614. }
  7615. /**
  7616. * dp_get_cfg_capabilities() - get dp capabilities
  7617. * @soc_handle: datapath soc handle
  7618. * @dp_caps: enum for dp capabilities
  7619. *
  7620. * Return: bool to determine if dp caps is enabled
  7621. */
  7622. static bool
  7623. dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
  7624. enum cdp_capabilities dp_caps)
  7625. {
  7626. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  7627. return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
  7628. }
  7629. #ifdef FEATURE_AST
  7630. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  7631. {
  7632. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  7633. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  7634. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7635. /*
  7636. * For BSS peer, new peer is not created on alloc_node if the
  7637. * peer with same address already exists , instead refcnt is
  7638. * increased for existing peer. Correspondingly in delete path,
  7639. * only refcnt is decreased; and peer is only deleted , when all
  7640. * references are deleted. So delete_in_progress should not be set
  7641. * for bss_peer, unless only 2 reference remains (peer map reference
  7642. * and peer hash table reference).
  7643. */
  7644. if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
  7645. return;
  7646. }
  7647. peer->delete_in_progress = true;
  7648. dp_peer_delete_ast_entries(soc, peer);
  7649. }
  7650. #endif
  7651. #ifdef ATH_SUPPORT_NAC_RSSI
  7652. /**
  7653. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  7654. * @vdev_hdl: DP vdev handle
  7655. * @rssi: rssi value
  7656. *
  7657. * Return: 0 for success. nonzero for failure.
  7658. */
  7659. QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
  7660. char *mac_addr,
  7661. uint8_t *rssi)
  7662. {
  7663. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  7664. struct dp_pdev *pdev = vdev->pdev;
  7665. struct dp_neighbour_peer *peer = NULL;
  7666. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  7667. *rssi = 0;
  7668. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  7669. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  7670. neighbour_peer_list_elem) {
  7671. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  7672. mac_addr, DP_MAC_ADDR_LEN) == 0) {
  7673. *rssi = peer->rssi;
  7674. status = QDF_STATUS_SUCCESS;
  7675. break;
  7676. }
  7677. }
  7678. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  7679. return status;
  7680. }
  7681. static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
  7682. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  7683. uint8_t chan_num)
  7684. {
  7685. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  7686. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  7687. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  7688. pdev->nac_rssi_filtering = 1;
  7689. /* Store address of NAC (neighbour peer) which will be checked
  7690. * against TA of received packets.
  7691. */
  7692. if (cmd == CDP_NAC_PARAM_ADD) {
  7693. dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
  7694. client_macaddr);
  7695. } else if (cmd == CDP_NAC_PARAM_DEL) {
  7696. dp_update_filter_neighbour_peers(vdev_handle,
  7697. DP_NAC_PARAM_DEL,
  7698. client_macaddr);
  7699. }
  7700. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  7701. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  7702. ((void *)vdev->pdev->ctrl_pdev,
  7703. vdev->vdev_id, cmd, bssid);
  7704. return QDF_STATUS_SUCCESS;
  7705. }
  7706. #endif
  7707. /**
  7708. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  7709. * for pktlog
  7710. * @txrx_pdev_handle: cdp_pdev handle
  7711. * @enb_dsb: Enable or disable peer based filtering
  7712. *
  7713. * Return: QDF_STATUS
  7714. */
  7715. static int
  7716. dp_enable_peer_based_pktlog(
  7717. struct cdp_pdev *txrx_pdev_handle,
  7718. char *mac_addr, uint8_t enb_dsb)
  7719. {
  7720. struct dp_peer *peer;
  7721. uint8_t local_id;
  7722. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
  7723. peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
  7724. mac_addr, &local_id);
  7725. if (!peer) {
  7726. dp_err("Invalid Peer");
  7727. return QDF_STATUS_E_FAILURE;
  7728. }
  7729. peer->peer_based_pktlog_filter = enb_dsb;
  7730. pdev->dp_peer_based_pktlog = enb_dsb;
  7731. return QDF_STATUS_SUCCESS;
  7732. }
  7733. static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
  7734. uint32_t max_peers,
  7735. bool peer_map_unmap_v2)
  7736. {
  7737. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  7738. soc->max_peers = max_peers;
  7739. qdf_print ("%s max_peers %u\n", __func__, max_peers);
  7740. if (dp_peer_find_attach(soc))
  7741. return QDF_STATUS_E_FAILURE;
  7742. soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
  7743. return QDF_STATUS_SUCCESS;
  7744. }
  7745. /**
  7746. * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
  7747. * @dp_pdev: dp pdev handle
  7748. * @ctrl_pdev: UMAC ctrl pdev handle
  7749. *
  7750. * Return: void
  7751. */
  7752. static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
  7753. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  7754. {
  7755. struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
  7756. pdev->ctrl_pdev = ctrl_pdev;
  7757. }
  7758. /*
  7759. * dp_get_cfg() - get dp cfg
  7760. * @soc: cdp soc handle
  7761. * @cfg: cfg enum
  7762. *
  7763. * Return: cfg value
  7764. */
  7765. static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
  7766. {
  7767. struct dp_soc *dpsoc = (struct dp_soc *)soc;
  7768. uint32_t value = 0;
  7769. switch (cfg) {
  7770. case cfg_dp_enable_data_stall:
  7771. value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
  7772. break;
  7773. case cfg_dp_enable_ip_tcp_udp_checksum_offload:
  7774. value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
  7775. break;
  7776. case cfg_dp_tso_enable:
  7777. value = dpsoc->wlan_cfg_ctx->tso_enabled;
  7778. break;
  7779. case cfg_dp_lro_enable:
  7780. value = dpsoc->wlan_cfg_ctx->lro_enabled;
  7781. break;
  7782. case cfg_dp_gro_enable:
  7783. value = dpsoc->wlan_cfg_ctx->gro_enabled;
  7784. break;
  7785. case cfg_dp_tx_flow_start_queue_offset:
  7786. value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
  7787. break;
  7788. case cfg_dp_tx_flow_stop_queue_threshold:
  7789. value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
  7790. break;
  7791. case cfg_dp_disable_intra_bss_fwd:
  7792. value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
  7793. break;
  7794. default:
  7795. value = 0;
  7796. }
  7797. return value;
  7798. }
  7799. static struct cdp_cmn_ops dp_ops_cmn = {
  7800. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  7801. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  7802. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  7803. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  7804. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  7805. .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
  7806. .txrx_peer_create = dp_peer_create_wifi3,
  7807. .txrx_peer_setup = dp_peer_setup_wifi3,
  7808. #ifdef FEATURE_AST
  7809. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  7810. #else
  7811. .txrx_peer_teardown = NULL,
  7812. #endif
  7813. .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
  7814. .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
  7815. .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
  7816. .txrx_peer_ast_hash_find_soc = dp_peer_ast_hash_find_soc_wifi3,
  7817. .txrx_peer_ast_hash_find_by_pdevid =
  7818. dp_peer_ast_hash_find_by_pdevid_wifi3,
  7819. .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
  7820. .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
  7821. .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
  7822. .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
  7823. .txrx_peer_ast_get_peer = dp_peer_ast_get_peer_wifi3,
  7824. .txrx_peer_ast_get_nexthop_peer_id =
  7825. dp_peer_ast_get_nexhop_peer_id_wifi3,
  7826. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  7827. .txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
  7828. .txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
  7829. .txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
  7830. .txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
  7831. #endif
  7832. .txrx_peer_delete = dp_peer_delete_wifi3,
  7833. .txrx_vdev_register = dp_vdev_register_wifi3,
  7834. .txrx_soc_detach = dp_soc_detach_wifi3,
  7835. .txrx_soc_deinit = dp_soc_deinit_wifi3,
  7836. .txrx_soc_init = dp_soc_init_wifi3,
  7837. .txrx_tso_soc_attach = dp_tso_soc_attach,
  7838. .txrx_tso_soc_detach = dp_tso_soc_detach,
  7839. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  7840. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  7841. .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
  7842. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  7843. .txrx_ath_getstats = dp_get_device_stats,
  7844. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  7845. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  7846. .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
  7847. .delba_process = dp_delba_process_wifi3,
  7848. .set_addba_response = dp_set_addba_response,
  7849. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  7850. .flush_cache_rx_queue = NULL,
  7851. /* TODO: get API's for dscp-tid need to be added*/
  7852. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  7853. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  7854. .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
  7855. .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
  7856. .txrx_get_total_per = dp_get_total_per,
  7857. .txrx_stats_request = dp_txrx_stats_request,
  7858. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  7859. .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
  7860. .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
  7861. .txrx_set_nac = dp_set_nac,
  7862. .txrx_get_tx_pending = dp_get_tx_pending,
  7863. .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
  7864. .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
  7865. .display_stats = dp_txrx_dump_stats,
  7866. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  7867. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  7868. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  7869. .txrx_intr_detach = dp_soc_interrupt_detach,
  7870. .set_pn_check = dp_set_pn_check_wifi3,
  7871. .update_config_parameters = dp_update_config_parameters,
  7872. /* TODO: Add other functions */
  7873. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
  7874. .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
  7875. .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
  7876. .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
  7877. .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
  7878. .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
  7879. .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
  7880. .tx_send = dp_tx_send,
  7881. .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
  7882. .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
  7883. .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
  7884. .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
  7885. .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
  7886. .txrx_get_os_rx_handles_from_vdev =
  7887. dp_get_os_rx_handles_from_vdev_wifi3,
  7888. .delba_tx_completion = dp_delba_tx_completion_wifi3,
  7889. .get_dp_capabilities = dp_get_cfg_capabilities,
  7890. .txrx_get_cfg = dp_get_cfg,
  7891. };
  7892. static struct cdp_ctrl_ops dp_ops_ctrl = {
  7893. .txrx_peer_authorize = dp_peer_authorize,
  7894. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  7895. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  7896. #ifdef MESH_MODE_SUPPORT
  7897. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  7898. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  7899. #endif
  7900. .txrx_set_vdev_param = dp_set_vdev_param,
  7901. .txrx_peer_set_nawds = dp_peer_set_nawds,
  7902. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  7903. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  7904. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  7905. .txrx_update_filter_neighbour_peers =
  7906. dp_update_filter_neighbour_peers,
  7907. .txrx_get_sec_type = dp_get_sec_type,
  7908. /* TODO: Add other functions */
  7909. .txrx_wdi_event_sub = dp_wdi_event_sub,
  7910. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  7911. #ifdef WDI_EVENT_ENABLE
  7912. .txrx_get_pldev = dp_get_pldev,
  7913. #endif
  7914. .txrx_set_pdev_param = dp_set_pdev_param,
  7915. #ifdef ATH_SUPPORT_NAC_RSSI
  7916. .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
  7917. .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
  7918. #endif
  7919. .set_key = dp_set_michael_key,
  7920. .txrx_get_vdev_param = dp_get_vdev_param,
  7921. .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
  7922. };
  7923. static struct cdp_me_ops dp_ops_me = {
  7924. #ifdef ATH_SUPPORT_IQUE
  7925. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  7926. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  7927. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  7928. #endif
  7929. .tx_me_find_ast_entry = NULL,
  7930. };
  7931. static struct cdp_mon_ops dp_ops_mon = {
  7932. .txrx_monitor_set_filter_ucast_data = NULL,
  7933. .txrx_monitor_set_filter_mcast_data = NULL,
  7934. .txrx_monitor_set_filter_non_data = NULL,
  7935. .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
  7936. .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
  7937. .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
  7938. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  7939. /* Added support for HK advance filter */
  7940. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  7941. };
  7942. static struct cdp_host_stats_ops dp_ops_host_stats = {
  7943. .txrx_per_peer_stats = dp_get_host_peer_stats,
  7944. .get_fw_peer_stats = dp_get_fw_peer_stats,
  7945. .get_htt_stats = dp_get_htt_stats,
  7946. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  7947. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  7948. .txrx_stats_publish = dp_txrx_stats_publish,
  7949. .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
  7950. .txrx_get_peer_stats = dp_txrx_get_peer_stats,
  7951. .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
  7952. .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
  7953. /* TODO */
  7954. };
  7955. static struct cdp_raw_ops dp_ops_raw = {
  7956. /* TODO */
  7957. };
  7958. #ifdef CONFIG_WIN
  7959. static struct cdp_pflow_ops dp_ops_pflow = {
  7960. /* TODO */
  7961. };
  7962. #endif /* CONFIG_WIN */
  7963. #ifdef FEATURE_RUNTIME_PM
  7964. /**
  7965. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  7966. * @opaque_pdev: DP pdev context
  7967. *
  7968. * DP is ready to runtime suspend if there are no pending TX packets.
  7969. *
  7970. * Return: QDF_STATUS
  7971. */
  7972. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  7973. {
  7974. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7975. struct dp_soc *soc = pdev->soc;
  7976. /* Abort if there are any pending TX packets */
  7977. if (dp_get_tx_pending(opaque_pdev) > 0) {
  7978. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  7979. FL("Abort suspend due to pending TX packets"));
  7980. return QDF_STATUS_E_AGAIN;
  7981. }
  7982. if (soc->intr_mode == DP_INTR_POLL)
  7983. qdf_timer_stop(&soc->int_timer);
  7984. return QDF_STATUS_SUCCESS;
  7985. }
  7986. /**
  7987. * dp_runtime_resume() - ensure DP is ready to runtime resume
  7988. * @opaque_pdev: DP pdev context
  7989. *
  7990. * Resume DP for runtime PM.
  7991. *
  7992. * Return: QDF_STATUS
  7993. */
  7994. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  7995. {
  7996. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  7997. struct dp_soc *soc = pdev->soc;
  7998. void *hal_srng;
  7999. int i;
  8000. if (soc->intr_mode == DP_INTR_POLL)
  8001. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8002. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  8003. hal_srng = soc->tcl_data_ring[i].hal_srng;
  8004. if (hal_srng) {
  8005. /* We actually only need to acquire the lock */
  8006. hal_srng_access_start(soc->hal_soc, hal_srng);
  8007. /* Update SRC ring head pointer for HW to send
  8008. all pending packets */
  8009. hal_srng_access_end(soc->hal_soc, hal_srng);
  8010. }
  8011. }
  8012. return QDF_STATUS_SUCCESS;
  8013. }
  8014. #endif /* FEATURE_RUNTIME_PM */
  8015. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  8016. {
  8017. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8018. struct dp_soc *soc = pdev->soc;
  8019. if (soc->intr_mode == DP_INTR_POLL)
  8020. qdf_timer_stop(&soc->int_timer);
  8021. return QDF_STATUS_SUCCESS;
  8022. }
  8023. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  8024. {
  8025. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  8026. struct dp_soc *soc = pdev->soc;
  8027. if (soc->intr_mode == DP_INTR_POLL)
  8028. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  8029. return QDF_STATUS_SUCCESS;
  8030. }
  8031. #ifndef CONFIG_WIN
  8032. static struct cdp_misc_ops dp_ops_misc = {
  8033. .tx_non_std = dp_tx_non_std,
  8034. .get_opmode = dp_get_opmode,
  8035. #ifdef FEATURE_RUNTIME_PM
  8036. .runtime_suspend = dp_runtime_suspend,
  8037. .runtime_resume = dp_runtime_resume,
  8038. #endif /* FEATURE_RUNTIME_PM */
  8039. .pkt_log_init = dp_pkt_log_init,
  8040. .pkt_log_con_service = dp_pkt_log_con_service,
  8041. .get_num_rx_contexts = dp_get_num_rx_contexts,
  8042. };
  8043. static struct cdp_flowctl_ops dp_ops_flowctl = {
  8044. /* WIFI 3.0 DP implement as required. */
  8045. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  8046. .flow_pool_map_handler = dp_tx_flow_pool_map,
  8047. .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
  8048. .register_pause_cb = dp_txrx_register_pause_cb,
  8049. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  8050. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  8051. };
  8052. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  8053. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8054. };
  8055. #ifdef IPA_OFFLOAD
  8056. static struct cdp_ipa_ops dp_ops_ipa = {
  8057. .ipa_get_resource = dp_ipa_get_resource,
  8058. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  8059. .ipa_op_response = dp_ipa_op_response,
  8060. .ipa_register_op_cb = dp_ipa_register_op_cb,
  8061. .ipa_get_stat = dp_ipa_get_stat,
  8062. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  8063. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  8064. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  8065. .ipa_setup = dp_ipa_setup,
  8066. .ipa_cleanup = dp_ipa_cleanup,
  8067. .ipa_setup_iface = dp_ipa_setup_iface,
  8068. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  8069. .ipa_enable_pipes = dp_ipa_enable_pipes,
  8070. .ipa_disable_pipes = dp_ipa_disable_pipes,
  8071. .ipa_set_perf_level = dp_ipa_set_perf_level
  8072. };
  8073. #endif
  8074. static struct cdp_bus_ops dp_ops_bus = {
  8075. .bus_suspend = dp_bus_suspend,
  8076. .bus_resume = dp_bus_resume
  8077. };
  8078. static struct cdp_ocb_ops dp_ops_ocb = {
  8079. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8080. };
  8081. static struct cdp_throttle_ops dp_ops_throttle = {
  8082. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8083. };
  8084. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  8085. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8086. };
  8087. static struct cdp_cfg_ops dp_ops_cfg = {
  8088. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  8089. };
  8090. /*
  8091. * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
  8092. * @dev: physical device instance
  8093. * @peer_mac_addr: peer mac address
  8094. * @local_id: local id for the peer
  8095. * @debug_id: to track enum peer access
  8096. *
  8097. * Return: peer instance pointer
  8098. */
  8099. static inline void *
  8100. dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  8101. uint8_t *local_id,
  8102. enum peer_debug_id_type debug_id)
  8103. {
  8104. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  8105. struct dp_peer *peer;
  8106. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  8107. if (!peer)
  8108. return NULL;
  8109. *local_id = peer->local_id;
  8110. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  8111. return peer;
  8112. }
  8113. /*
  8114. * dp_peer_release_ref - release peer ref count
  8115. * @peer: peer handle
  8116. * @debug_id: to track enum peer access
  8117. *
  8118. * Return: None
  8119. */
  8120. static inline
  8121. void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
  8122. {
  8123. dp_peer_unref_delete(peer);
  8124. }
  8125. static struct cdp_peer_ops dp_ops_peer = {
  8126. .register_peer = dp_register_peer,
  8127. .clear_peer = dp_clear_peer,
  8128. .find_peer_by_addr = dp_find_peer_by_addr,
  8129. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  8130. .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
  8131. .peer_release_ref = dp_peer_release_ref,
  8132. .local_peer_id = dp_local_peer_id,
  8133. .peer_find_by_local_id = dp_peer_find_by_local_id,
  8134. .peer_state_update = dp_peer_state_update,
  8135. .get_vdevid = dp_get_vdevid,
  8136. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  8137. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  8138. .get_vdev_for_peer = dp_get_vdev_for_peer,
  8139. .get_peer_state = dp_get_peer_state,
  8140. };
  8141. #endif
  8142. static struct cdp_ops dp_txrx_ops = {
  8143. .cmn_drv_ops = &dp_ops_cmn,
  8144. .ctrl_ops = &dp_ops_ctrl,
  8145. .me_ops = &dp_ops_me,
  8146. .mon_ops = &dp_ops_mon,
  8147. .host_stats_ops = &dp_ops_host_stats,
  8148. .wds_ops = &dp_ops_wds,
  8149. .raw_ops = &dp_ops_raw,
  8150. #ifdef CONFIG_WIN
  8151. .pflow_ops = &dp_ops_pflow,
  8152. #endif /* CONFIG_WIN */
  8153. #ifndef CONFIG_WIN
  8154. .misc_ops = &dp_ops_misc,
  8155. .cfg_ops = &dp_ops_cfg,
  8156. .flowctl_ops = &dp_ops_flowctl,
  8157. .l_flowctl_ops = &dp_ops_l_flowctl,
  8158. #ifdef IPA_OFFLOAD
  8159. .ipa_ops = &dp_ops_ipa,
  8160. #endif
  8161. .bus_ops = &dp_ops_bus,
  8162. .ocb_ops = &dp_ops_ocb,
  8163. .peer_ops = &dp_ops_peer,
  8164. .throttle_ops = &dp_ops_throttle,
  8165. .mob_stats_ops = &dp_ops_mob_stats,
  8166. #endif
  8167. };
  8168. /*
  8169. * dp_soc_set_txrx_ring_map()
  8170. * @dp_soc: DP handler for soc
  8171. *
  8172. * Return: Void
  8173. */
  8174. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  8175. {
  8176. uint32_t i;
  8177. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  8178. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  8179. }
  8180. }
  8181. #ifdef QCA_WIFI_QCA8074
  8182. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  8183. /**
  8184. * dp_soc_attach_wifi3() - Attach txrx SOC
  8185. * @ctrl_psoc: Opaque SOC handle from control plane
  8186. * @htc_handle: Opaque HTC handle
  8187. * @hif_handle: Opaque HIF handle
  8188. * @qdf_osdev: QDF device
  8189. * @ol_ops: Offload Operations
  8190. * @device_id: Device ID
  8191. *
  8192. * Return: DP SOC handle on success, NULL on failure
  8193. */
  8194. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8195. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8196. struct ol_if_ops *ol_ops, uint16_t device_id)
  8197. {
  8198. struct dp_soc *dp_soc = NULL;
  8199. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8200. ol_ops, device_id);
  8201. if (!dp_soc)
  8202. return NULL;
  8203. if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
  8204. return NULL;
  8205. return (void *)dp_soc;
  8206. }
  8207. #else
  8208. /**
  8209. * dp_soc_attach_wifi3() - Attach txrx SOC
  8210. * @ctrl_psoc: Opaque SOC handle from control plane
  8211. * @htc_handle: Opaque HTC handle
  8212. * @hif_handle: Opaque HIF handle
  8213. * @qdf_osdev: QDF device
  8214. * @ol_ops: Offload Operations
  8215. * @device_id: Device ID
  8216. *
  8217. * Return: DP SOC handle on success, NULL on failure
  8218. */
  8219. void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
  8220. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8221. struct ol_if_ops *ol_ops, uint16_t device_id)
  8222. {
  8223. struct dp_soc *dp_soc = NULL;
  8224. dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
  8225. ol_ops, device_id);
  8226. return (void *)dp_soc;
  8227. }
  8228. #endif
  8229. /**
  8230. * dp_soc_attach() - Attach txrx SOC
  8231. * @ctrl_psoc: Opaque SOC handle from control plane
  8232. * @htc_handle: Opaque HTC handle
  8233. * @qdf_osdev: QDF device
  8234. * @ol_ops: Offload Operations
  8235. * @device_id: Device ID
  8236. *
  8237. * Return: DP SOC handle on success, NULL on failure
  8238. */
  8239. static struct dp_soc *
  8240. dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8241. struct ol_if_ops *ol_ops, uint16_t device_id)
  8242. {
  8243. int int_ctx;
  8244. struct dp_soc *soc = NULL;
  8245. struct htt_soc *htt_soc = NULL;
  8246. soc = qdf_mem_malloc(sizeof(*soc));
  8247. if (!soc) {
  8248. dp_err("DP SOC memory allocation failed");
  8249. goto fail0;
  8250. }
  8251. int_ctx = 0;
  8252. soc->device_id = device_id;
  8253. soc->cdp_soc.ops = &dp_txrx_ops;
  8254. soc->cdp_soc.ol_ops = ol_ops;
  8255. soc->ctrl_psoc = ctrl_psoc;
  8256. soc->osdev = qdf_osdev;
  8257. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
  8258. soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
  8259. if (!soc->wlan_cfg_ctx) {
  8260. dp_err("wlan_cfg_ctx failed\n");
  8261. goto fail1;
  8262. }
  8263. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  8264. if (!htt_soc) {
  8265. dp_err("HTT attach failed");
  8266. goto fail1;
  8267. }
  8268. soc->htt_handle = htt_soc;
  8269. htt_soc->dp_soc = soc;
  8270. htt_soc->htc_soc = htc_handle;
  8271. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  8272. goto fail2;
  8273. return (void *)soc;
  8274. fail2:
  8275. qdf_mem_free(htt_soc);
  8276. fail1:
  8277. qdf_mem_free(soc);
  8278. fail0:
  8279. return NULL;
  8280. }
  8281. /**
  8282. * dp_soc_init() - Initialize txrx SOC
  8283. * @dp_soc: Opaque DP SOC handle
  8284. * @htc_handle: Opaque HTC handle
  8285. * @hif_handle: Opaque HIF handle
  8286. *
  8287. * Return: DP SOC handle on success, NULL on failure
  8288. */
  8289. void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
  8290. {
  8291. int target_type;
  8292. struct dp_soc *soc = (struct dp_soc *)dpsoc;
  8293. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  8294. htt_soc->htc_soc = htc_handle;
  8295. soc->hif_handle = hif_handle;
  8296. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  8297. if (!soc->hal_soc)
  8298. return NULL;
  8299. htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
  8300. soc->hal_soc, soc->osdev);
  8301. target_type = hal_get_target_type(soc->hal_soc);
  8302. switch (target_type) {
  8303. case TARGET_TYPE_QCA6290:
  8304. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8305. REO_DST_RING_SIZE_QCA6290);
  8306. soc->ast_override_support = 1;
  8307. break;
  8308. #ifdef QCA_WIFI_QCA6390
  8309. case TARGET_TYPE_QCA6390:
  8310. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8311. REO_DST_RING_SIZE_QCA6290);
  8312. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8313. soc->ast_override_support = 1;
  8314. if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  8315. int int_ctx;
  8316. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  8317. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  8318. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  8319. }
  8320. }
  8321. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  8322. break;
  8323. #endif
  8324. case TARGET_TYPE_QCA8074:
  8325. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8326. REO_DST_RING_SIZE_QCA8074);
  8327. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  8328. break;
  8329. case TARGET_TYPE_QCA8074V2:
  8330. case TARGET_TYPE_QCA6018:
  8331. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  8332. REO_DST_RING_SIZE_QCA8074);
  8333. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  8334. soc->hw_nac_monitor_support = 1;
  8335. soc->ast_override_support = 1;
  8336. soc->per_tid_basize_max_tid = 8;
  8337. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  8338. break;
  8339. default:
  8340. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  8341. qdf_assert_always(0);
  8342. break;
  8343. }
  8344. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  8345. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  8346. soc->cce_disable = false;
  8347. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  8348. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8349. CDP_CFG_MAX_PEER_ID);
  8350. if (ret != -EINVAL) {
  8351. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  8352. }
  8353. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  8354. CDP_CFG_CCE_DISABLE);
  8355. if (ret == 1)
  8356. soc->cce_disable = true;
  8357. }
  8358. qdf_spinlock_create(&soc->peer_ref_mutex);
  8359. qdf_spinlock_create(&soc->ast_lock);
  8360. dp_soc_wds_attach(soc);
  8361. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  8362. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  8363. /* fill the tx/rx cpu ring map*/
  8364. dp_soc_set_txrx_ring_map(soc);
  8365. qdf_spinlock_create(&soc->htt_stats.lock);
  8366. /* initialize work queue for stats processing */
  8367. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  8368. return soc;
  8369. }
  8370. /**
  8371. * dp_soc_init_wifi3() - Initialize txrx SOC
  8372. * @dp_soc: Opaque DP SOC handle
  8373. * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
  8374. * @hif_handle: Opaque HIF handle
  8375. * @htc_handle: Opaque HTC handle
  8376. * @qdf_osdev: QDF device (Unused)
  8377. * @ol_ops: Offload Operations (Unused)
  8378. * @device_id: Device ID (Unused)
  8379. *
  8380. * Return: DP SOC handle on success, NULL on failure
  8381. */
  8382. void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
  8383. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  8384. struct ol_if_ops *ol_ops, uint16_t device_id)
  8385. {
  8386. return dp_soc_init(dpsoc, htc_handle, hif_handle);
  8387. }
  8388. #endif
  8389. /*
  8390. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  8391. *
  8392. * @soc: handle to DP soc
  8393. * @mac_id: MAC id
  8394. *
  8395. * Return: Return pdev corresponding to MAC
  8396. */
  8397. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  8398. {
  8399. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  8400. return soc->pdev_list[mac_id];
  8401. /* Typically for MCL as there only 1 PDEV*/
  8402. return soc->pdev_list[0];
  8403. }
  8404. /*
  8405. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  8406. * @soc: DP SoC context
  8407. * @max_mac_rings: No of MAC rings
  8408. *
  8409. * Return: None
  8410. */
  8411. static
  8412. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  8413. int *max_mac_rings)
  8414. {
  8415. bool dbs_enable = false;
  8416. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  8417. dbs_enable = soc->cdp_soc.ol_ops->
  8418. is_hw_dbs_2x2_capable(soc->ctrl_psoc);
  8419. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  8420. }
  8421. /*
  8422. * dp_set_pktlog_wifi3() - attach txrx vdev
  8423. * @pdev: Datapath PDEV handle
  8424. * @event: which event's notifications are being subscribed to
  8425. * @enable: WDI event subscribe or not. (True or False)
  8426. *
  8427. * Return: Success, NULL on failure
  8428. */
  8429. #ifdef WDI_EVENT_ENABLE
  8430. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  8431. bool enable)
  8432. {
  8433. struct dp_soc *soc = NULL;
  8434. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  8435. int max_mac_rings = wlan_cfg_get_num_mac_rings
  8436. (pdev->wlan_cfg_ctx);
  8437. uint8_t mac_id = 0;
  8438. soc = pdev->soc;
  8439. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  8440. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  8441. FL("Max_mac_rings %d "),
  8442. max_mac_rings);
  8443. if (enable) {
  8444. switch (event) {
  8445. case WDI_EVENT_RX_DESC:
  8446. if (pdev->monitor_vdev) {
  8447. /* Nothing needs to be done if monitor mode is
  8448. * enabled
  8449. */
  8450. return 0;
  8451. }
  8452. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  8453. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  8454. htt_tlv_filter.mpdu_start = 1;
  8455. htt_tlv_filter.msdu_start = 1;
  8456. htt_tlv_filter.msdu_end = 1;
  8457. htt_tlv_filter.mpdu_end = 1;
  8458. htt_tlv_filter.packet_header = 1;
  8459. htt_tlv_filter.attention = 1;
  8460. htt_tlv_filter.ppdu_start = 1;
  8461. htt_tlv_filter.ppdu_end = 1;
  8462. htt_tlv_filter.ppdu_end_user_stats = 1;
  8463. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8464. htt_tlv_filter.ppdu_end_status_done = 1;
  8465. htt_tlv_filter.enable_fp = 1;
  8466. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8467. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8468. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8469. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8470. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8471. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8472. for (mac_id = 0; mac_id < max_mac_rings;
  8473. mac_id++) {
  8474. int mac_for_pdev =
  8475. dp_get_mac_id_for_pdev(mac_id,
  8476. pdev->pdev_id);
  8477. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8478. mac_for_pdev,
  8479. pdev->rxdma_mon_status_ring[mac_id]
  8480. .hal_srng,
  8481. RXDMA_MONITOR_STATUS,
  8482. RX_BUFFER_SIZE,
  8483. &htt_tlv_filter);
  8484. }
  8485. if (soc->reap_timer_init)
  8486. qdf_timer_mod(&soc->mon_reap_timer,
  8487. DP_INTR_POLL_TIMER_MS);
  8488. }
  8489. break;
  8490. case WDI_EVENT_LITE_RX:
  8491. if (pdev->monitor_vdev) {
  8492. /* Nothing needs to be done if monitor mode is
  8493. * enabled
  8494. */
  8495. return 0;
  8496. }
  8497. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  8498. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  8499. htt_tlv_filter.ppdu_start = 1;
  8500. htt_tlv_filter.ppdu_end = 1;
  8501. htt_tlv_filter.ppdu_end_user_stats = 1;
  8502. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  8503. htt_tlv_filter.ppdu_end_status_done = 1;
  8504. htt_tlv_filter.mpdu_start = 1;
  8505. htt_tlv_filter.enable_fp = 1;
  8506. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  8507. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  8508. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  8509. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  8510. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  8511. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  8512. for (mac_id = 0; mac_id < max_mac_rings;
  8513. mac_id++) {
  8514. int mac_for_pdev =
  8515. dp_get_mac_id_for_pdev(mac_id,
  8516. pdev->pdev_id);
  8517. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8518. mac_for_pdev,
  8519. pdev->rxdma_mon_status_ring[mac_id]
  8520. .hal_srng,
  8521. RXDMA_MONITOR_STATUS,
  8522. RX_BUFFER_SIZE_PKTLOG_LITE,
  8523. &htt_tlv_filter);
  8524. }
  8525. if (soc->reap_timer_init)
  8526. qdf_timer_mod(&soc->mon_reap_timer,
  8527. DP_INTR_POLL_TIMER_MS);
  8528. }
  8529. break;
  8530. case WDI_EVENT_LITE_T2H:
  8531. if (pdev->monitor_vdev) {
  8532. /* Nothing needs to be done if monitor mode is
  8533. * enabled
  8534. */
  8535. return 0;
  8536. }
  8537. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8538. int mac_for_pdev = dp_get_mac_id_for_pdev(
  8539. mac_id, pdev->pdev_id);
  8540. pdev->pktlog_ppdu_stats = true;
  8541. dp_h2t_cfg_stats_msg_send(pdev,
  8542. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  8543. mac_for_pdev);
  8544. }
  8545. break;
  8546. default:
  8547. /* Nothing needs to be done for other pktlog types */
  8548. break;
  8549. }
  8550. } else {
  8551. switch (event) {
  8552. case WDI_EVENT_RX_DESC:
  8553. case WDI_EVENT_LITE_RX:
  8554. if (pdev->monitor_vdev) {
  8555. /* Nothing needs to be done if monitor mode is
  8556. * enabled
  8557. */
  8558. return 0;
  8559. }
  8560. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  8561. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  8562. for (mac_id = 0; mac_id < max_mac_rings;
  8563. mac_id++) {
  8564. int mac_for_pdev =
  8565. dp_get_mac_id_for_pdev(mac_id,
  8566. pdev->pdev_id);
  8567. htt_h2t_rx_ring_cfg(soc->htt_handle,
  8568. mac_for_pdev,
  8569. pdev->rxdma_mon_status_ring[mac_id]
  8570. .hal_srng,
  8571. RXDMA_MONITOR_STATUS,
  8572. RX_BUFFER_SIZE,
  8573. &htt_tlv_filter);
  8574. }
  8575. if (soc->reap_timer_init)
  8576. qdf_timer_stop(&soc->mon_reap_timer);
  8577. }
  8578. break;
  8579. case WDI_EVENT_LITE_T2H:
  8580. if (pdev->monitor_vdev) {
  8581. /* Nothing needs to be done if monitor mode is
  8582. * enabled
  8583. */
  8584. return 0;
  8585. }
  8586. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  8587. * passing value 0. Once these macros will define in htt
  8588. * header file will use proper macros
  8589. */
  8590. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  8591. int mac_for_pdev =
  8592. dp_get_mac_id_for_pdev(mac_id,
  8593. pdev->pdev_id);
  8594. pdev->pktlog_ppdu_stats = false;
  8595. if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
  8596. dp_h2t_cfg_stats_msg_send(pdev, 0,
  8597. mac_for_pdev);
  8598. } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  8599. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
  8600. mac_for_pdev);
  8601. } else if (pdev->enhanced_stats_en) {
  8602. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  8603. mac_for_pdev);
  8604. }
  8605. }
  8606. break;
  8607. default:
  8608. /* Nothing needs to be done for other pktlog types */
  8609. break;
  8610. }
  8611. }
  8612. return 0;
  8613. }
  8614. #endif