dp_main.c 178 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <qdf_module.h>
  23. #include <hal_api.h>
  24. #include <hif.h>
  25. #include <htt.h>
  26. #include <wdi_event.h>
  27. #include <queue.h>
  28. #include "dp_htt.h"
  29. #include "dp_types.h"
  30. #include "dp_internal.h"
  31. #include "dp_tx.h"
  32. #include "dp_tx_desc.h"
  33. #include "dp_rx.h"
  34. #include <cdp_txrx_handle.h>
  35. #include <wlan_cfg.h>
  36. #include "cdp_txrx_cmn_struct.h"
  37. #include <qdf_util.h>
  38. #include "dp_peer.h"
  39. #include "dp_rx_mon.h"
  40. #include "htt_stats.h"
  41. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  42. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  43. #include "cdp_txrx_flow_ctrl_v2.h"
  44. #else
  45. static inline void
  46. cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
  47. {
  48. return;
  49. }
  50. #endif
  51. #include "dp_ipa.h"
  52. #ifdef CONFIG_MCL
  53. static void dp_service_mon_rings(void *arg);
  54. #ifndef REMOVE_PKT_LOG
  55. #include <pktlog_ac_api.h>
  56. #include <pktlog_ac.h>
  57. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
  58. #endif
  59. #endif
  60. static void dp_pktlogmod_exit(struct dp_pdev *handle);
  61. #define DP_INTR_POLL_TIMER_MS 10
  62. #define DP_WDS_AGING_TIMER_DEFAULT_MS 120000
  63. #define DP_MCS_LENGTH (6*MAX_MCS)
  64. #define DP_NSS_LENGTH (6*SS_COUNT)
  65. #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
  66. #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
  67. #define DP_MAX_MCS_STRING_LEN 30
  68. #define DP_CURR_FW_STATS_AVAIL 19
  69. #define DP_HTT_DBG_EXT_STATS_MAX 256
  70. #ifdef IPA_OFFLOAD
  71. /* Exclude IPA rings from the interrupt context */
  72. #define TX_RING_MASK_VAL 0xb
  73. #define RX_RING_MASK_VAL 0x7
  74. #else
  75. #define TX_RING_MASK_VAL 0xF
  76. #define RX_RING_MASK_VAL 0xF
  77. #endif
  78. bool rx_hash = 1;
  79. qdf_declare_param(rx_hash, bool);
  80. #define STR_MAXLEN 64
  81. #define DP_PPDU_STATS_CFG_ALL 0xffff
  82. /**
  83. * default_dscp_tid_map - Default DSCP-TID mapping
  84. *
  85. * DSCP TID AC
  86. * 000000 0 WME_AC_BE
  87. * 001000 1 WME_AC_BK
  88. * 010000 1 WME_AC_BK
  89. * 011000 0 WME_AC_BE
  90. * 100000 5 WME_AC_VI
  91. * 101000 5 WME_AC_VI
  92. * 110000 6 WME_AC_VO
  93. * 111000 6 WME_AC_VO
  94. */
  95. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  96. 0, 0, 0, 0, 0, 0, 0, 0,
  97. 1, 1, 1, 1, 1, 1, 1, 1,
  98. 1, 1, 1, 1, 1, 1, 1, 1,
  99. 0, 0, 0, 0, 0, 0, 0, 0,
  100. 5, 5, 5, 5, 5, 5, 5, 5,
  101. 5, 5, 5, 5, 5, 5, 5, 5,
  102. 6, 6, 6, 6, 6, 6, 6, 6,
  103. 6, 6, 6, 6, 6, 6, 6, 6,
  104. };
  105. /*
  106. * struct dp_rate_debug
  107. *
  108. * @mcs_type: print string for a given mcs
  109. * @valid: valid mcs rate?
  110. */
  111. struct dp_rate_debug {
  112. char mcs_type[DP_MAX_MCS_STRING_LEN];
  113. uint8_t valid;
  114. };
  115. #define MCS_VALID 1
  116. #define MCS_INVALID 0
  117. static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
  118. {
  119. {"CCK 11 Mbps Long ", MCS_VALID},
  120. {"CCK 5.5 Mbps Long ", MCS_VALID},
  121. {"CCK 2 Mbps Long ", MCS_VALID},
  122. {"CCK 1 Mbps Long ", MCS_VALID},
  123. {"CCK 11 Mbps Short ", MCS_VALID},
  124. {"CCK 5.5 Mbps Short", MCS_VALID},
  125. {"CCK 2 Mbps Short ", MCS_VALID},
  126. {"INVALID ", MCS_INVALID},
  127. {"INVALID ", MCS_INVALID},
  128. {"INVALID ", MCS_INVALID},
  129. {"INVALID ", MCS_INVALID},
  130. {"INVALID ", MCS_INVALID},
  131. {"INVALID ", MCS_VALID},
  132. },
  133. {
  134. {"OFDM 48 Mbps", MCS_VALID},
  135. {"OFDM 24 Mbps", MCS_VALID},
  136. {"OFDM 12 Mbps", MCS_VALID},
  137. {"OFDM 6 Mbps ", MCS_VALID},
  138. {"OFDM 54 Mbps", MCS_VALID},
  139. {"OFDM 36 Mbps", MCS_VALID},
  140. {"OFDM 18 Mbps", MCS_VALID},
  141. {"OFDM 9 Mbps ", MCS_VALID},
  142. {"INVALID ", MCS_INVALID},
  143. {"INVALID ", MCS_INVALID},
  144. {"INVALID ", MCS_INVALID},
  145. {"INVALID ", MCS_INVALID},
  146. {"INVALID ", MCS_VALID},
  147. },
  148. {
  149. {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
  150. {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
  151. {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
  152. {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
  153. {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
  154. {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
  155. {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
  156. {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
  157. {"INVALID ", MCS_INVALID},
  158. {"INVALID ", MCS_INVALID},
  159. {"INVALID ", MCS_INVALID},
  160. {"INVALID ", MCS_INVALID},
  161. {"INVALID ", MCS_VALID},
  162. },
  163. {
  164. {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
  165. {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
  166. {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
  167. {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
  168. {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
  169. {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
  170. {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
  171. {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
  172. {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
  173. {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
  174. {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
  175. {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
  176. {"INVALID ", MCS_VALID},
  177. },
  178. {
  179. {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
  180. {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
  181. {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
  182. {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
  183. {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
  184. {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
  185. {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
  186. {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
  187. {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
  188. {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
  189. {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
  190. {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
  191. {"INVALID ", MCS_VALID},
  192. }
  193. };
  194. /**
  195. * @brief Cpu ring map types
  196. */
  197. enum dp_cpu_ring_map_types {
  198. DP_DEFAULT_MAP,
  199. DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
  200. DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
  201. DP_NSS_ALL_RADIO_OFFLOADED_MAP,
  202. DP_CPU_RING_MAP_MAX
  203. };
  204. /**
  205. * @brief Cpu to tx ring map
  206. */
  207. static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
  208. {0x0, 0x1, 0x2, 0x0},
  209. {0x1, 0x2, 0x1, 0x2},
  210. {0x0, 0x2, 0x0, 0x2},
  211. {0x2, 0x2, 0x2, 0x2}
  212. };
  213. /**
  214. * @brief Select the type of statistics
  215. */
  216. enum dp_stats_type {
  217. STATS_FW = 0,
  218. STATS_HOST = 1,
  219. STATS_TYPE_MAX = 2,
  220. };
  221. /**
  222. * @brief General Firmware statistics options
  223. *
  224. */
  225. enum dp_fw_stats {
  226. TXRX_FW_STATS_INVALID = -1,
  227. };
  228. /**
  229. * dp_stats_mapping_table - Firmware and Host statistics
  230. * currently supported
  231. */
  232. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  233. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  234. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  235. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  236. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  237. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  238. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  239. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  240. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  241. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  242. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  243. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  244. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  245. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  246. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  247. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  248. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  249. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  250. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  251. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  252. /* Last ENUM for HTT FW STATS */
  253. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  254. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  255. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  256. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  257. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  258. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  259. {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
  260. {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
  261. };
  262. /**
  263. * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
  264. * @ring_num: ring num of the ring being queried
  265. * @grp_mask: the grp_mask array for the ring type in question.
  266. *
  267. * The grp_mask array is indexed by group number and the bit fields correspond
  268. * to ring numbers. We are finding which interrupt group a ring belongs to.
  269. *
  270. * Return: the index in the grp_mask array with the ring number.
  271. * -QDF_STATUS_E_NOENT if no entry is found
  272. */
  273. static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
  274. {
  275. int ext_group_num;
  276. int mask = 1 << ring_num;
  277. for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
  278. ext_group_num++) {
  279. if (mask & grp_mask[ext_group_num])
  280. return ext_group_num;
  281. }
  282. return -QDF_STATUS_E_NOENT;
  283. }
  284. static int dp_srng_calculate_msi_group(struct dp_soc *soc,
  285. enum hal_ring_type ring_type,
  286. int ring_num)
  287. {
  288. int *grp_mask;
  289. switch (ring_type) {
  290. case WBM2SW_RELEASE:
  291. /* dp_tx_comp_handler - soc->tx_comp_ring */
  292. if (ring_num < 3)
  293. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  294. /* dp_rx_wbm_err_process - soc->rx_rel_ring */
  295. else if (ring_num == 3) {
  296. /* sw treats this as a separate ring type */
  297. grp_mask = &soc->wlan_cfg_ctx->
  298. int_rx_wbm_rel_ring_mask[0];
  299. ring_num = 0;
  300. } else {
  301. qdf_assert(0);
  302. return -QDF_STATUS_E_NOENT;
  303. }
  304. break;
  305. case REO_EXCEPTION:
  306. /* dp_rx_err_process - &soc->reo_exception_ring */
  307. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  308. break;
  309. case REO_DST:
  310. /* dp_rx_process - soc->reo_dest_ring */
  311. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  312. break;
  313. case REO_STATUS:
  314. /* dp_reo_status_ring_handler - soc->reo_status_ring */
  315. grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
  316. break;
  317. /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
  318. case RXDMA_MONITOR_STATUS:
  319. /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
  320. case RXDMA_MONITOR_DST:
  321. /* dp_mon_process */
  322. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  323. break;
  324. case RXDMA_DST:
  325. /* dp_rxdma_err_process */
  326. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  327. break;
  328. case RXDMA_BUF:
  329. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  330. break;
  331. case RXDMA_MONITOR_BUF:
  332. /* TODO: support low_thresh interrupt */
  333. return -QDF_STATUS_E_NOENT;
  334. break;
  335. case TCL_DATA:
  336. case TCL_CMD:
  337. case REO_CMD:
  338. case SW2WBM_RELEASE:
  339. case WBM_IDLE_LINK:
  340. /* normally empty SW_TO_HW rings */
  341. return -QDF_STATUS_E_NOENT;
  342. break;
  343. case TCL_STATUS:
  344. case REO_REINJECT:
  345. /* misc unused rings */
  346. return -QDF_STATUS_E_NOENT;
  347. break;
  348. case CE_SRC:
  349. case CE_DST:
  350. case CE_DST_STATUS:
  351. /* CE_rings - currently handled by hif */
  352. default:
  353. return -QDF_STATUS_E_NOENT;
  354. break;
  355. }
  356. return dp_srng_find_ring_in_mask(ring_num, grp_mask);
  357. }
  358. static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
  359. *ring_params, int ring_type, int ring_num)
  360. {
  361. int msi_group_number;
  362. int msi_data_count;
  363. int ret;
  364. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  365. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  366. &msi_data_count, &msi_data_start,
  367. &msi_irq_start);
  368. if (ret)
  369. return;
  370. msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
  371. ring_num);
  372. if (msi_group_number < 0) {
  373. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  374. FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
  375. ring_type, ring_num);
  376. ring_params->msi_addr = 0;
  377. ring_params->msi_data = 0;
  378. return;
  379. }
  380. if (msi_group_number > msi_data_count) {
  381. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  382. FL("2 msi_groups will share an msi; msi_group_num %d"),
  383. msi_group_number);
  384. QDF_ASSERT(0);
  385. }
  386. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  387. ring_params->msi_addr = addr_low;
  388. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  389. ring_params->msi_data = (msi_group_number % msi_data_count)
  390. + msi_data_start;
  391. ring_params->flags |= HAL_SRNG_MSI_INTR;
  392. }
  393. /**
  394. * dp_print_ast_stats() - Dump AST table contents
  395. * @soc: Datapath soc handle
  396. *
  397. * return void
  398. */
  399. #ifdef FEATURE_WDS
  400. static void dp_print_ast_stats(struct dp_soc *soc)
  401. {
  402. uint8_t i;
  403. uint8_t num_entries = 0;
  404. struct dp_vdev *vdev;
  405. struct dp_pdev *pdev;
  406. struct dp_peer *peer;
  407. struct dp_ast_entry *ase, *tmp_ase;
  408. DP_PRINT_STATS("AST Stats:");
  409. DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
  410. DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
  411. DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
  412. DP_PRINT_STATS("AST Table:");
  413. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  414. pdev = soc->pdev_list[i];
  415. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  416. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  417. DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
  418. DP_PRINT_STATS("%6d mac_addr = %pM"
  419. " peer_mac_addr = %pM"
  420. " type = %d"
  421. " next_hop = %d"
  422. " is_active = %d"
  423. " is_bss = %d",
  424. ++num_entries,
  425. ase->mac_addr.raw,
  426. ase->peer->mac_addr.raw,
  427. ase->type,
  428. ase->next_hop,
  429. ase->is_active,
  430. ase->is_bss);
  431. }
  432. }
  433. }
  434. }
  435. }
  436. #else
  437. static void dp_print_ast_stats(struct dp_soc *soc)
  438. {
  439. DP_PRINT_STATS("AST Stats not available.Enable FEATURE_WDS");
  440. return;
  441. }
  442. #endif
  443. /*
  444. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  445. */
  446. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  447. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  448. {
  449. void *hal_soc = soc->hal_soc;
  450. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  451. /* TODO: See if we should get align size from hal */
  452. uint32_t ring_base_align = 8;
  453. struct hal_srng_params ring_params;
  454. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  455. /* TODO: Currently hal layer takes care of endianness related settings.
  456. * See if these settings need to passed from DP layer
  457. */
  458. ring_params.flags = 0;
  459. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  460. FL("Ring type: %d, num:%d"), ring_type, ring_num);
  461. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  462. srng->hal_srng = NULL;
  463. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  464. srng->num_entries = num_entries;
  465. srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
  466. soc->osdev, soc->osdev->dev, srng->alloc_size,
  467. &(srng->base_paddr_unaligned));
  468. if (!srng->base_vaddr_unaligned) {
  469. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  470. FL("alloc failed - ring_type: %d, ring_num %d"),
  471. ring_type, ring_num);
  472. return QDF_STATUS_E_NOMEM;
  473. }
  474. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  475. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  476. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  477. ((unsigned long)(ring_params.ring_base_vaddr) -
  478. (unsigned long)srng->base_vaddr_unaligned);
  479. ring_params.num_entries = num_entries;
  480. if (soc->intr_mode == DP_INTR_MSI) {
  481. dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
  482. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  483. FL("Using MSI for ring_type: %d, ring_num %d"),
  484. ring_type, ring_num);
  485. } else {
  486. ring_params.msi_data = 0;
  487. ring_params.msi_addr = 0;
  488. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  489. FL("Skipping MSI for ring_type: %d, ring_num %d"),
  490. ring_type, ring_num);
  491. }
  492. /*
  493. * Setup interrupt timer and batch counter thresholds for
  494. * interrupt mitigation based on ring type
  495. */
  496. if (ring_type == REO_DST) {
  497. ring_params.intr_timer_thres_us =
  498. wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
  499. ring_params.intr_batch_cntr_thres_entries =
  500. wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
  501. } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
  502. ring_params.intr_timer_thres_us =
  503. wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
  504. ring_params.intr_batch_cntr_thres_entries =
  505. wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
  506. } else {
  507. ring_params.intr_timer_thres_us =
  508. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  509. ring_params.intr_batch_cntr_thres_entries =
  510. wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
  511. }
  512. /* Enable low threshold interrupts for rx buffer rings (regular and
  513. * monitor buffer rings.
  514. * TODO: See if this is required for any other ring
  515. */
  516. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
  517. /* TODO: Setting low threshold to 1/8th of ring size
  518. * see if this needs to be configurable
  519. */
  520. ring_params.low_threshold = num_entries >> 3;
  521. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  522. ring_params.intr_timer_thres_us = 0x1000;
  523. }
  524. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  525. mac_id, &ring_params);
  526. if (!srng->hal_srng) {
  527. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  528. srng->alloc_size,
  529. srng->base_vaddr_unaligned,
  530. srng->base_paddr_unaligned, 0);
  531. }
  532. return 0;
  533. }
  534. /**
  535. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  536. * Any buffers allocated and attached to ring entries are expected to be freed
  537. * before calling this function.
  538. */
  539. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  540. int ring_type, int ring_num)
  541. {
  542. if (!srng->hal_srng) {
  543. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  544. FL("Ring type: %d, num:%d not setup"),
  545. ring_type, ring_num);
  546. return;
  547. }
  548. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  549. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  550. srng->alloc_size,
  551. srng->base_vaddr_unaligned,
  552. srng->base_paddr_unaligned, 0);
  553. srng->hal_srng = NULL;
  554. }
  555. /* TODO: Need this interface from HIF */
  556. void *hif_get_hal_handle(void *hif_handle);
  557. /*
  558. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  559. * @dp_ctx: DP SOC handle
  560. * @budget: Number of frames/descriptors that can be processed in one shot
  561. *
  562. * Return: remaining budget/quota for the soc device
  563. */
  564. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  565. {
  566. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  567. struct dp_soc *soc = int_ctx->soc;
  568. int ring = 0;
  569. uint32_t work_done = 0;
  570. int budget = dp_budget;
  571. uint8_t tx_mask = int_ctx->tx_ring_mask;
  572. uint8_t rx_mask = int_ctx->rx_ring_mask;
  573. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  574. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  575. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  576. uint32_t remaining_quota = dp_budget;
  577. struct dp_pdev *pdev = NULL;
  578. /* Process Tx completion interrupts first to return back buffers */
  579. while (tx_mask) {
  580. if (tx_mask & 0x1) {
  581. work_done = dp_tx_comp_handler(soc,
  582. soc->tx_comp_ring[ring].hal_srng,
  583. remaining_quota);
  584. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  585. "tx mask 0x%x ring %d, budget %d, work_done %d",
  586. tx_mask, ring, budget, work_done);
  587. budget -= work_done;
  588. if (budget <= 0)
  589. goto budget_done;
  590. remaining_quota = budget;
  591. }
  592. tx_mask = tx_mask >> 1;
  593. ring++;
  594. }
  595. /* Process REO Exception ring interrupt */
  596. if (rx_err_mask) {
  597. work_done = dp_rx_err_process(soc,
  598. soc->reo_exception_ring.hal_srng,
  599. remaining_quota);
  600. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  601. "REO Exception Ring: work_done %d budget %d",
  602. work_done, budget);
  603. budget -= work_done;
  604. if (budget <= 0) {
  605. goto budget_done;
  606. }
  607. remaining_quota = budget;
  608. }
  609. /* Process Rx WBM release ring interrupt */
  610. if (rx_wbm_rel_mask) {
  611. work_done = dp_rx_wbm_err_process(soc,
  612. soc->rx_rel_ring.hal_srng, remaining_quota);
  613. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  614. "WBM Release Ring: work_done %d budget %d",
  615. work_done, budget);
  616. budget -= work_done;
  617. if (budget <= 0) {
  618. goto budget_done;
  619. }
  620. remaining_quota = budget;
  621. }
  622. /* Process Rx interrupts */
  623. if (rx_mask) {
  624. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  625. if (rx_mask & (1 << ring)) {
  626. work_done = dp_rx_process(int_ctx,
  627. soc->reo_dest_ring[ring].hal_srng,
  628. remaining_quota);
  629. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  630. "rx mask 0x%x ring %d, work_done %d budget %d",
  631. rx_mask, ring, work_done, budget);
  632. budget -= work_done;
  633. if (budget <= 0)
  634. goto budget_done;
  635. remaining_quota = budget;
  636. }
  637. }
  638. for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
  639. /* Need to check on this, why is required */
  640. work_done = dp_rxdma_err_process(soc, ring,
  641. remaining_quota);
  642. budget -= work_done;
  643. }
  644. }
  645. if (reo_status_mask)
  646. dp_reo_status_ring_handler(soc);
  647. /* Process LMAC interrupts */
  648. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  649. pdev = soc->pdev_list[ring];
  650. if (pdev == NULL)
  651. continue;
  652. if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
  653. work_done = dp_mon_process(soc, ring, remaining_quota);
  654. budget -= work_done;
  655. if (budget <= 0)
  656. goto budget_done;
  657. remaining_quota = budget;
  658. }
  659. if (int_ctx->rxdma2host_ring_mask & (1 << ring)) {
  660. work_done = dp_rxdma_err_process(soc, ring,
  661. remaining_quota);
  662. budget -= work_done;
  663. if (budget <= 0)
  664. goto budget_done;
  665. remaining_quota = budget;
  666. }
  667. if (int_ctx->host2rxdma_ring_mask & (1 << ring)) {
  668. union dp_rx_desc_list_elem_t *desc_list = NULL;
  669. union dp_rx_desc_list_elem_t *tail = NULL;
  670. struct dp_srng *rx_refill_buf_ring =
  671. &pdev->rx_refill_buf_ring;
  672. DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1);
  673. dp_rx_buffers_replenish(soc, ring,
  674. rx_refill_buf_ring,
  675. &soc->rx_desc_buf[ring], 0,
  676. &desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
  677. }
  678. }
  679. qdf_lro_flush(int_ctx->lro_ctx);
  680. budget_done:
  681. return dp_budget - budget;
  682. }
  683. #ifdef DP_INTR_POLL_BASED
  684. /* dp_interrupt_timer()- timer poll for interrupts
  685. *
  686. * @arg: SoC Handle
  687. *
  688. * Return:
  689. *
  690. */
  691. static void dp_interrupt_timer(void *arg)
  692. {
  693. struct dp_soc *soc = (struct dp_soc *) arg;
  694. int i;
  695. if (qdf_atomic_read(&soc->cmn_init_done)) {
  696. for (i = 0;
  697. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  698. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  699. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  700. }
  701. }
  702. /*
  703. * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
  704. * @txrx_soc: DP SOC handle
  705. *
  706. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  707. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  708. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  709. *
  710. * Return: 0 for success. nonzero for failure.
  711. */
  712. static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
  713. {
  714. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  715. int i;
  716. soc->intr_mode = DP_INTR_POLL;
  717. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  718. soc->intr_ctx[i].dp_intr_id = i;
  719. soc->intr_ctx[i].tx_ring_mask =
  720. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  721. soc->intr_ctx[i].rx_ring_mask =
  722. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  723. soc->intr_ctx[i].rx_mon_ring_mask =
  724. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  725. soc->intr_ctx[i].rx_err_ring_mask =
  726. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  727. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  728. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  729. soc->intr_ctx[i].reo_status_ring_mask =
  730. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  731. soc->intr_ctx[i].rxdma2host_ring_mask =
  732. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  733. soc->intr_ctx[i].soc = soc;
  734. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  735. }
  736. qdf_timer_init(soc->osdev, &soc->int_timer,
  737. dp_interrupt_timer, (void *)soc,
  738. QDF_TIMER_TYPE_WAKE_APPS);
  739. return QDF_STATUS_SUCCESS;
  740. }
  741. #if defined(CONFIG_MCL)
  742. extern int con_mode_monitor;
  743. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
  744. /*
  745. * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
  746. * @txrx_soc: DP SOC handle
  747. *
  748. * Call the appropriate attach function based on the mode of operation.
  749. * This is a WAR for enabling monitor mode.
  750. *
  751. * Return: 0 for success. nonzero for failure.
  752. */
  753. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  754. {
  755. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  756. if (!(soc->wlan_cfg_ctx->napi_enabled) ||
  757. con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
  758. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  759. "%s: Poll mode", __func__);
  760. return dp_soc_interrupt_attach_poll(txrx_soc);
  761. } else {
  762. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  763. "%s: Interrupt mode", __func__);
  764. return dp_soc_interrupt_attach(txrx_soc);
  765. }
  766. }
  767. #else
  768. static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
  769. {
  770. return dp_soc_interrupt_attach_poll(txrx_soc);
  771. }
  772. #endif
  773. #endif
  774. static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
  775. int intr_ctx_num, int *irq_id_map, int *num_irq_r)
  776. {
  777. int j;
  778. int num_irq = 0;
  779. int tx_mask =
  780. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  781. int rx_mask =
  782. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  783. int rx_mon_mask =
  784. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
  785. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  786. soc->wlan_cfg_ctx, intr_ctx_num);
  787. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  788. soc->wlan_cfg_ctx, intr_ctx_num);
  789. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  790. soc->wlan_cfg_ctx, intr_ctx_num);
  791. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  792. soc->wlan_cfg_ctx, intr_ctx_num);
  793. int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
  794. soc->wlan_cfg_ctx, intr_ctx_num);
  795. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  796. if (tx_mask & (1 << j)) {
  797. irq_id_map[num_irq++] =
  798. (wbm2host_tx_completions_ring1 - j);
  799. }
  800. if (rx_mask & (1 << j)) {
  801. irq_id_map[num_irq++] =
  802. (reo2host_destination_ring1 - j);
  803. }
  804. if (rxdma2host_ring_mask & (1 << j)) {
  805. irq_id_map[num_irq++] =
  806. rxdma2host_destination_ring_mac1 -
  807. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  808. }
  809. if (host2rxdma_ring_mask & (1 << j)) {
  810. irq_id_map[num_irq++] =
  811. host2rxdma_host_buf_ring_mac1 -
  812. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  813. }
  814. if (rx_mon_mask & (1 << j)) {
  815. irq_id_map[num_irq++] =
  816. ppdu_end_interrupts_mac1 -
  817. wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  818. }
  819. if (rx_wbm_rel_ring_mask & (1 << j))
  820. irq_id_map[num_irq++] = wbm2host_rx_release;
  821. if (rx_err_ring_mask & (1 << j))
  822. irq_id_map[num_irq++] = reo2host_exception;
  823. if (reo_status_ring_mask & (1 << j))
  824. irq_id_map[num_irq++] = reo2host_status;
  825. }
  826. *num_irq_r = num_irq;
  827. }
  828. static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
  829. int intr_ctx_num, int *irq_id_map, int *num_irq_r,
  830. int msi_vector_count, int msi_vector_start)
  831. {
  832. int tx_mask = wlan_cfg_get_tx_ring_mask(
  833. soc->wlan_cfg_ctx, intr_ctx_num);
  834. int rx_mask = wlan_cfg_get_rx_ring_mask(
  835. soc->wlan_cfg_ctx, intr_ctx_num);
  836. int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
  837. soc->wlan_cfg_ctx, intr_ctx_num);
  838. int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
  839. soc->wlan_cfg_ctx, intr_ctx_num);
  840. int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  841. soc->wlan_cfg_ctx, intr_ctx_num);
  842. int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
  843. soc->wlan_cfg_ctx, intr_ctx_num);
  844. int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
  845. soc->wlan_cfg_ctx, intr_ctx_num);
  846. unsigned int vector =
  847. (intr_ctx_num % msi_vector_count) + msi_vector_start;
  848. int num_irq = 0;
  849. soc->intr_mode = DP_INTR_MSI;
  850. if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
  851. rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
  852. irq_id_map[num_irq++] =
  853. pld_get_msi_irq(soc->osdev->dev, vector);
  854. *num_irq_r = num_irq;
  855. }
  856. static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
  857. int *irq_id_map, int *num_irq)
  858. {
  859. int msi_vector_count, ret;
  860. uint32_t msi_base_data, msi_vector_start;
  861. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  862. &msi_vector_count,
  863. &msi_base_data,
  864. &msi_vector_start);
  865. if (ret)
  866. return dp_soc_interrupt_map_calculate_integrated(soc,
  867. intr_ctx_num, irq_id_map, num_irq);
  868. else
  869. dp_soc_interrupt_map_calculate_msi(soc,
  870. intr_ctx_num, irq_id_map, num_irq,
  871. msi_vector_count, msi_vector_start);
  872. }
  873. /*
  874. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  875. * @txrx_soc: DP SOC handle
  876. *
  877. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  878. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  879. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  880. *
  881. * Return: 0 for success. nonzero for failure.
  882. */
  883. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  884. {
  885. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  886. int i = 0;
  887. int num_irq = 0;
  888. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  889. int ret = 0;
  890. /* Map of IRQ ids registered with one interrupt context */
  891. int irq_id_map[HIF_MAX_GRP_IRQ];
  892. int tx_mask =
  893. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  894. int rx_mask =
  895. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  896. int rx_mon_mask =
  897. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  898. int rx_err_ring_mask =
  899. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  900. int rx_wbm_rel_ring_mask =
  901. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  902. int reo_status_ring_mask =
  903. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  904. int rxdma2host_ring_mask =
  905. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  906. int host2rxdma_ring_mask =
  907. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  908. soc->intr_ctx[i].dp_intr_id = i;
  909. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  910. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  911. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  912. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  913. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  914. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  915. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  916. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  917. soc->intr_ctx[i].soc = soc;
  918. num_irq = 0;
  919. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  920. &num_irq);
  921. ret = hif_register_ext_group(soc->hif_handle,
  922. num_irq, irq_id_map, dp_service_srngs,
  923. &soc->intr_ctx[i], "dp_intr",
  924. HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  925. if (ret) {
  926. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  927. FL("failed, ret = %d"), ret);
  928. return QDF_STATUS_E_FAILURE;
  929. }
  930. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  931. }
  932. hif_configure_ext_group_interrupts(soc->hif_handle);
  933. return QDF_STATUS_SUCCESS;
  934. }
  935. /*
  936. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  937. * @txrx_soc: DP SOC handle
  938. *
  939. * Return: void
  940. */
  941. static void dp_soc_interrupt_detach(void *txrx_soc)
  942. {
  943. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  944. int i;
  945. if (soc->intr_mode == DP_INTR_POLL) {
  946. qdf_timer_stop(&soc->int_timer);
  947. qdf_timer_free(&soc->int_timer);
  948. } else {
  949. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  950. }
  951. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  952. soc->intr_ctx[i].tx_ring_mask = 0;
  953. soc->intr_ctx[i].rx_ring_mask = 0;
  954. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  955. soc->intr_ctx[i].rx_err_ring_mask = 0;
  956. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  957. soc->intr_ctx[i].reo_status_ring_mask = 0;
  958. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  959. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  960. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  961. }
  962. }
  963. #define AVG_MAX_MPDUS_PER_TID 128
  964. #define AVG_TIDS_PER_CLIENT 2
  965. #define AVG_FLOWS_PER_TID 2
  966. #define AVG_MSDUS_PER_FLOW 128
  967. #define AVG_MSDUS_PER_MPDU 4
  968. /*
  969. * Allocate and setup link descriptor pool that will be used by HW for
  970. * various link and queue descriptors and managed by WBM
  971. */
  972. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  973. {
  974. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  975. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  976. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  977. uint32_t num_mpdus_per_link_desc =
  978. hal_num_mpdus_per_link_desc(soc->hal_soc);
  979. uint32_t num_msdus_per_link_desc =
  980. hal_num_msdus_per_link_desc(soc->hal_soc);
  981. uint32_t num_mpdu_links_per_queue_desc =
  982. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  983. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  984. uint32_t total_link_descs, total_mem_size;
  985. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  986. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  987. uint32_t num_link_desc_banks;
  988. uint32_t last_bank_size = 0;
  989. uint32_t entry_size, num_entries;
  990. int i;
  991. uint32_t desc_id = 0;
  992. /* Only Tx queue descriptors are allocated from common link descriptor
  993. * pool Rx queue descriptors are not included in this because (REO queue
  994. * extension descriptors) they are expected to be allocated contiguously
  995. * with REO queue descriptors
  996. */
  997. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  998. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  999. num_mpdu_queue_descs = num_mpdu_link_descs /
  1000. num_mpdu_links_per_queue_desc;
  1001. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1002. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  1003. num_msdus_per_link_desc;
  1004. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  1005. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  1006. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  1007. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  1008. /* Round up to power of 2 */
  1009. total_link_descs = 1;
  1010. while (total_link_descs < num_entries)
  1011. total_link_descs <<= 1;
  1012. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1013. FL("total_link_descs: %u, link_desc_size: %d"),
  1014. total_link_descs, link_desc_size);
  1015. total_mem_size = total_link_descs * link_desc_size;
  1016. total_mem_size += link_desc_align;
  1017. if (total_mem_size <= max_alloc_size) {
  1018. num_link_desc_banks = 0;
  1019. last_bank_size = total_mem_size;
  1020. } else {
  1021. num_link_desc_banks = (total_mem_size) /
  1022. (max_alloc_size - link_desc_align);
  1023. last_bank_size = total_mem_size %
  1024. (max_alloc_size - link_desc_align);
  1025. }
  1026. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1027. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  1028. total_mem_size, num_link_desc_banks);
  1029. for (i = 0; i < num_link_desc_banks; i++) {
  1030. soc->link_desc_banks[i].base_vaddr_unaligned =
  1031. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  1032. max_alloc_size,
  1033. &(soc->link_desc_banks[i].base_paddr_unaligned));
  1034. soc->link_desc_banks[i].size = max_alloc_size;
  1035. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  1036. soc->link_desc_banks[i].base_vaddr_unaligned) +
  1037. ((unsigned long)(
  1038. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1039. link_desc_align));
  1040. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  1041. soc->link_desc_banks[i].base_paddr_unaligned) +
  1042. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1043. (unsigned long)(
  1044. soc->link_desc_banks[i].base_vaddr_unaligned));
  1045. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  1046. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1047. FL("Link descriptor memory alloc failed"));
  1048. goto fail;
  1049. }
  1050. }
  1051. if (last_bank_size) {
  1052. /* Allocate last bank in case total memory required is not exact
  1053. * multiple of max_alloc_size
  1054. */
  1055. soc->link_desc_banks[i].base_vaddr_unaligned =
  1056. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  1057. last_bank_size,
  1058. &(soc->link_desc_banks[i].base_paddr_unaligned));
  1059. soc->link_desc_banks[i].size = last_bank_size;
  1060. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  1061. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  1062. ((unsigned long)(
  1063. soc->link_desc_banks[i].base_vaddr_unaligned) %
  1064. link_desc_align));
  1065. soc->link_desc_banks[i].base_paddr =
  1066. (unsigned long)(
  1067. soc->link_desc_banks[i].base_paddr_unaligned) +
  1068. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  1069. (unsigned long)(
  1070. soc->link_desc_banks[i].base_vaddr_unaligned));
  1071. }
  1072. /* Allocate and setup link descriptor idle list for HW internal use */
  1073. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  1074. total_mem_size = entry_size * total_link_descs;
  1075. if (total_mem_size <= max_alloc_size) {
  1076. void *desc;
  1077. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  1078. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  1079. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1080. FL("Link desc idle ring setup failed"));
  1081. goto fail;
  1082. }
  1083. hal_srng_access_start_unlocked(soc->hal_soc,
  1084. soc->wbm_idle_link_ring.hal_srng);
  1085. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1086. soc->link_desc_banks[i].base_paddr; i++) {
  1087. uint32_t num_entries = (soc->link_desc_banks[i].size -
  1088. ((unsigned long)(
  1089. soc->link_desc_banks[i].base_vaddr) -
  1090. (unsigned long)(
  1091. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1092. / link_desc_size;
  1093. unsigned long paddr = (unsigned long)(
  1094. soc->link_desc_banks[i].base_paddr);
  1095. while (num_entries && (desc = hal_srng_src_get_next(
  1096. soc->hal_soc,
  1097. soc->wbm_idle_link_ring.hal_srng))) {
  1098. hal_set_link_desc_addr(desc,
  1099. LINK_DESC_COOKIE(desc_id, i), paddr);
  1100. num_entries--;
  1101. desc_id++;
  1102. paddr += link_desc_size;
  1103. }
  1104. }
  1105. hal_srng_access_end_unlocked(soc->hal_soc,
  1106. soc->wbm_idle_link_ring.hal_srng);
  1107. } else {
  1108. uint32_t num_scatter_bufs;
  1109. uint32_t num_entries_per_buf;
  1110. uint32_t rem_entries;
  1111. uint8_t *scatter_buf_ptr;
  1112. uint16_t scatter_buf_num;
  1113. soc->wbm_idle_scatter_buf_size =
  1114. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1115. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  1116. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  1117. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1118. soc->hal_soc, total_mem_size,
  1119. soc->wbm_idle_scatter_buf_size);
  1120. for (i = 0; i < num_scatter_bufs; i++) {
  1121. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1122. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  1123. soc->wbm_idle_scatter_buf_size,
  1124. &(soc->wbm_idle_scatter_buf_base_paddr[i]));
  1125. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  1126. QDF_TRACE(QDF_MODULE_ID_DP,
  1127. QDF_TRACE_LEVEL_ERROR,
  1128. FL("Scatter list memory alloc failed"));
  1129. goto fail;
  1130. }
  1131. }
  1132. /* Populate idle list scatter buffers with link descriptor
  1133. * pointers
  1134. */
  1135. scatter_buf_num = 0;
  1136. scatter_buf_ptr = (uint8_t *)(
  1137. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  1138. rem_entries = num_entries_per_buf;
  1139. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  1140. soc->link_desc_banks[i].base_paddr; i++) {
  1141. uint32_t num_link_descs =
  1142. (soc->link_desc_banks[i].size -
  1143. ((unsigned long)(
  1144. soc->link_desc_banks[i].base_vaddr) -
  1145. (unsigned long)(
  1146. soc->link_desc_banks[i].base_vaddr_unaligned)))
  1147. / link_desc_size;
  1148. unsigned long paddr = (unsigned long)(
  1149. soc->link_desc_banks[i].base_paddr);
  1150. while (num_link_descs) {
  1151. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  1152. LINK_DESC_COOKIE(desc_id, i), paddr);
  1153. num_link_descs--;
  1154. desc_id++;
  1155. paddr += link_desc_size;
  1156. rem_entries--;
  1157. if (rem_entries) {
  1158. scatter_buf_ptr += entry_size;
  1159. } else {
  1160. rem_entries = num_entries_per_buf;
  1161. scatter_buf_num++;
  1162. if (scatter_buf_num >= num_scatter_bufs)
  1163. break;
  1164. scatter_buf_ptr = (uint8_t *)(
  1165. soc->wbm_idle_scatter_buf_base_vaddr[
  1166. scatter_buf_num]);
  1167. }
  1168. }
  1169. }
  1170. /* Setup link descriptor idle list in HW */
  1171. hal_setup_link_idle_list(soc->hal_soc,
  1172. soc->wbm_idle_scatter_buf_base_paddr,
  1173. soc->wbm_idle_scatter_buf_base_vaddr,
  1174. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  1175. (uint32_t)(scatter_buf_ptr -
  1176. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  1177. scatter_buf_num-1])), total_link_descs);
  1178. }
  1179. return 0;
  1180. fail:
  1181. if (soc->wbm_idle_link_ring.hal_srng) {
  1182. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  1183. WBM_IDLE_LINK, 0);
  1184. }
  1185. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1186. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1187. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1188. soc->wbm_idle_scatter_buf_size,
  1189. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1190. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1191. }
  1192. }
  1193. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1194. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1195. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1196. soc->link_desc_banks[i].size,
  1197. soc->link_desc_banks[i].base_vaddr_unaligned,
  1198. soc->link_desc_banks[i].base_paddr_unaligned,
  1199. 0);
  1200. }
  1201. }
  1202. return QDF_STATUS_E_FAILURE;
  1203. }
  1204. /*
  1205. * Free link descriptor pool that was setup HW
  1206. */
  1207. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  1208. {
  1209. int i;
  1210. if (soc->wbm_idle_link_ring.hal_srng) {
  1211. dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
  1212. WBM_IDLE_LINK, 0);
  1213. }
  1214. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1215. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1216. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1217. soc->wbm_idle_scatter_buf_size,
  1218. soc->wbm_idle_scatter_buf_base_vaddr[i],
  1219. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  1220. }
  1221. }
  1222. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  1223. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  1224. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1225. soc->link_desc_banks[i].size,
  1226. soc->link_desc_banks[i].base_vaddr_unaligned,
  1227. soc->link_desc_banks[i].base_paddr_unaligned,
  1228. 0);
  1229. }
  1230. }
  1231. }
  1232. /* TODO: Following should be configurable */
  1233. #define WBM_RELEASE_RING_SIZE 64
  1234. #define TCL_CMD_RING_SIZE 32
  1235. #define TCL_STATUS_RING_SIZE 32
  1236. #if defined(QCA_WIFI_QCA6290)
  1237. #define REO_DST_RING_SIZE 1024
  1238. #else
  1239. #define REO_DST_RING_SIZE 2048
  1240. #endif
  1241. #define REO_REINJECT_RING_SIZE 32
  1242. #define RX_RELEASE_RING_SIZE 1024
  1243. #define REO_EXCEPTION_RING_SIZE 128
  1244. #define REO_CMD_RING_SIZE 32
  1245. #define REO_STATUS_RING_SIZE 32
  1246. #define RXDMA_BUF_RING_SIZE 1024
  1247. #define RXDMA_REFILL_RING_SIZE 4096
  1248. #define RXDMA_MONITOR_BUF_RING_SIZE 4096
  1249. #define RXDMA_MONITOR_DST_RING_SIZE 2048
  1250. #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
  1251. #define RXDMA_MONITOR_DESC_RING_SIZE 2048
  1252. #define RXDMA_ERR_DST_RING_SIZE 1024
  1253. /*
  1254. * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
  1255. * @soc: Datapath SOC handle
  1256. *
  1257. * This is a timer function used to age out stale WDS nodes from
  1258. * AST table
  1259. */
  1260. #ifdef FEATURE_WDS
  1261. static void dp_wds_aging_timer_fn(void *soc_hdl)
  1262. {
  1263. struct dp_soc *soc = (struct dp_soc *) soc_hdl;
  1264. struct dp_pdev *pdev;
  1265. struct dp_vdev *vdev;
  1266. struct dp_peer *peer;
  1267. struct dp_ast_entry *ase, *temp_ase;
  1268. int i;
  1269. qdf_spin_lock_bh(&soc->ast_lock);
  1270. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  1271. pdev = soc->pdev_list[i];
  1272. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1273. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  1274. DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
  1275. /*
  1276. * Do not expire static ast entries
  1277. */
  1278. if (ase->type == CDP_TXRX_AST_TYPE_STATIC)
  1279. continue;
  1280. if (ase->is_active) {
  1281. ase->is_active = FALSE;
  1282. continue;
  1283. }
  1284. DP_STATS_INC(soc, ast.aged_out, 1);
  1285. soc->cdp_soc.ol_ops->peer_del_wds_entry(
  1286. vdev->osif_vdev,
  1287. ase->mac_addr.raw);
  1288. dp_peer_del_ast(soc, ase);
  1289. }
  1290. }
  1291. }
  1292. }
  1293. qdf_spin_unlock_bh(&soc->ast_lock);
  1294. if (qdf_atomic_read(&soc->cmn_init_done))
  1295. qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
  1296. }
  1297. /*
  1298. * dp_soc_wds_attach() - Setup WDS timer and AST table
  1299. * @soc: Datapath SOC handle
  1300. *
  1301. * Return: None
  1302. */
  1303. static void dp_soc_wds_attach(struct dp_soc *soc)
  1304. {
  1305. qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
  1306. dp_wds_aging_timer_fn, (void *)soc,
  1307. QDF_TIMER_TYPE_WAKE_APPS);
  1308. qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
  1309. }
  1310. /*
  1311. * dp_soc_wds_detach() - Detach WDS data structures and timers
  1312. * @txrx_soc: DP SOC handle
  1313. *
  1314. * Return: None
  1315. */
  1316. static void dp_soc_wds_detach(struct dp_soc *soc)
  1317. {
  1318. qdf_timer_stop(&soc->wds_aging_timer);
  1319. qdf_timer_free(&soc->wds_aging_timer);
  1320. }
  1321. #else
  1322. static void dp_soc_wds_attach(struct dp_soc *soc)
  1323. {
  1324. }
  1325. static void dp_soc_wds_detach(struct dp_soc *soc)
  1326. {
  1327. }
  1328. #endif
  1329. /*
  1330. * dp_soc_reset_ring_map() - Reset cpu ring map
  1331. * @soc: Datapath soc handler
  1332. *
  1333. * This api resets the default cpu ring map
  1334. */
  1335. static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  1336. {
  1337. uint8_t i;
  1338. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1339. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  1340. if (nss_config == 1) {
  1341. /*
  1342. * Setting Tx ring map for one nss offloaded radio
  1343. */
  1344. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  1345. } else if (nss_config == 2) {
  1346. /*
  1347. * Setting Tx ring for two nss offloaded radios
  1348. */
  1349. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  1350. } else {
  1351. /*
  1352. * Setting Tx ring map for all nss offloaded radios
  1353. */
  1354. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
  1355. }
  1356. }
  1357. }
  1358. /*
  1359. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  1360. * @dp_soc - DP soc handle
  1361. * @ring_type - ring type
  1362. * @ring_num - ring_num
  1363. *
  1364. * return 0 or 1
  1365. */
  1366. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
  1367. {
  1368. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1369. uint8_t status = 0;
  1370. switch (ring_type) {
  1371. case WBM2SW_RELEASE:
  1372. case REO_DST:
  1373. case RXDMA_BUF:
  1374. status = ((nss_config) & (1 << ring_num));
  1375. break;
  1376. default:
  1377. break;
  1378. }
  1379. return status;
  1380. }
  1381. /*
  1382. * dp_soc_reset_intr_mask() - reset interrupt mask
  1383. * @dp_soc - DP Soc handle
  1384. *
  1385. * Return: Return void
  1386. */
  1387. static void dp_soc_reset_intr_mask(struct dp_soc *soc)
  1388. {
  1389. uint8_t j;
  1390. int *grp_mask = NULL;
  1391. int group_number, mask, num_ring;
  1392. /* number of tx ring */
  1393. num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  1394. /*
  1395. * group mask for tx completion ring.
  1396. */
  1397. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  1398. /* loop and reset the mask for only offloaded ring */
  1399. for (j = 0; j < num_ring; j++) {
  1400. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
  1401. continue;
  1402. }
  1403. /*
  1404. * Group number corresponding to tx offloaded ring.
  1405. */
  1406. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1407. if (group_number < 0) {
  1408. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1409. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  1410. WBM2SW_RELEASE, j);
  1411. return;
  1412. }
  1413. /* reset the tx mask for offloaded ring */
  1414. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  1415. mask &= (~(1 << j));
  1416. /*
  1417. * reset the interrupt mask for offloaded ring.
  1418. */
  1419. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  1420. }
  1421. /* number of rx rings */
  1422. num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  1423. /*
  1424. * group mask for reo destination ring.
  1425. */
  1426. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  1427. /* loop and reset the mask for only offloaded ring */
  1428. for (j = 0; j < num_ring; j++) {
  1429. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
  1430. continue;
  1431. }
  1432. /*
  1433. * Group number corresponding to rx offloaded ring.
  1434. */
  1435. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1436. if (group_number < 0) {
  1437. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1438. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  1439. REO_DST, j);
  1440. return;
  1441. }
  1442. /* set the interrupt mask for offloaded ring */
  1443. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  1444. mask &= (~(1 << j));
  1445. /*
  1446. * set the interrupt mask to zero for rx offloaded radio.
  1447. */
  1448. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  1449. }
  1450. /*
  1451. * group mask for Rx buffer refill ring
  1452. */
  1453. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  1454. /* loop and reset the mask for only offloaded ring */
  1455. for (j = 0; j < MAX_PDEV_CNT; j++) {
  1456. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  1457. continue;
  1458. }
  1459. /*
  1460. * Group number corresponding to rx offloaded ring.
  1461. */
  1462. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1463. if (group_number < 0) {
  1464. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1465. FL("ring not part of any group; ring_type: %d,ring_num %d"),
  1466. REO_DST, j);
  1467. return;
  1468. }
  1469. /* set the interrupt mask for offloaded ring */
  1470. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  1471. group_number);
  1472. mask &= (~(1 << j));
  1473. /*
  1474. * set the interrupt mask to zero for rx offloaded radio.
  1475. */
  1476. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  1477. group_number, mask);
  1478. }
  1479. }
  1480. #ifdef IPA_OFFLOAD
  1481. /**
  1482. * dp_reo_remap_config() - configure reo remap register value based
  1483. * nss configuration.
  1484. * based on offload_radio value below remap configuration
  1485. * get applied.
  1486. * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
  1487. * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
  1488. * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
  1489. * 3 - both Radios handled by NSS (remap not required)
  1490. * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
  1491. *
  1492. * @remap1: output parameter indicates reo remap 1 register value
  1493. * @remap2: output parameter indicates reo remap 2 register value
  1494. * Return: bool type, true if remap is configured else false.
  1495. */
  1496. static bool dp_reo_remap_config(struct dp_soc *soc,
  1497. uint32_t *remap1,
  1498. uint32_t *remap2)
  1499. {
  1500. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
  1501. (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
  1502. *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
  1503. (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
  1504. return true;
  1505. }
  1506. #else
  1507. static bool dp_reo_remap_config(struct dp_soc *soc,
  1508. uint32_t *remap1,
  1509. uint32_t *remap2)
  1510. {
  1511. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1512. switch (offload_radio) {
  1513. case 0:
  1514. *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  1515. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  1516. (0x3 << 18) | (0x4 << 21)) << 8;
  1517. *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
  1518. (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
  1519. (0x3 << 18) | (0x4 << 21)) << 8;
  1520. break;
  1521. case 1:
  1522. *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
  1523. (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
  1524. (0x2 << 18) | (0x3 << 21)) << 8;
  1525. *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
  1526. (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
  1527. (0x4 << 18) | (0x2 << 21)) << 8;
  1528. break;
  1529. case 2:
  1530. *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
  1531. (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
  1532. (0x1 << 18) | (0x3 << 21)) << 8;
  1533. *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
  1534. (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
  1535. (0x4 << 18) | (0x1 << 21)) << 8;
  1536. break;
  1537. case 3:
  1538. /* return false if both radios are offloaded to NSS */
  1539. return false;
  1540. }
  1541. return true;
  1542. }
  1543. #endif
  1544. /*
  1545. * dp_soc_cmn_setup() - Common SoC level initializion
  1546. * @soc: Datapath SOC handle
  1547. *
  1548. * This is an internal function used to setup common SOC data structures,
  1549. * to be called from PDEV attach after receiving HW mode capabilities from FW
  1550. */
  1551. static int dp_soc_cmn_setup(struct dp_soc *soc)
  1552. {
  1553. int i;
  1554. struct hal_reo_params reo_params;
  1555. int tx_ring_size;
  1556. int tx_comp_ring_size;
  1557. if (qdf_atomic_read(&soc->cmn_init_done))
  1558. return 0;
  1559. if (dp_peer_find_attach(soc))
  1560. goto fail0;
  1561. if (dp_hw_link_desc_pool_setup(soc))
  1562. goto fail1;
  1563. /* Setup SRNG rings */
  1564. /* Common rings */
  1565. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  1566. WBM_RELEASE_RING_SIZE)) {
  1567. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1568. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  1569. goto fail1;
  1570. }
  1571. soc->num_tcl_data_rings = 0;
  1572. /* Tx data rings */
  1573. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1574. soc->num_tcl_data_rings =
  1575. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  1576. tx_comp_ring_size =
  1577. wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
  1578. tx_ring_size =
  1579. wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
  1580. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  1581. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  1582. TCL_DATA, i, 0, tx_ring_size)) {
  1583. QDF_TRACE(QDF_MODULE_ID_DP,
  1584. QDF_TRACE_LEVEL_ERROR,
  1585. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  1586. goto fail1;
  1587. }
  1588. /*
  1589. * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
  1590. * count
  1591. */
  1592. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  1593. WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
  1594. QDF_TRACE(QDF_MODULE_ID_DP,
  1595. QDF_TRACE_LEVEL_ERROR,
  1596. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  1597. goto fail1;
  1598. }
  1599. }
  1600. } else {
  1601. /* This will be incremented during per pdev ring setup */
  1602. soc->num_tcl_data_rings = 0;
  1603. }
  1604. if (dp_tx_soc_attach(soc)) {
  1605. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1606. FL("dp_tx_soc_attach failed"));
  1607. goto fail1;
  1608. }
  1609. /* TCL command and status rings */
  1610. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  1611. TCL_CMD_RING_SIZE)) {
  1612. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1613. FL("dp_srng_setup failed for tcl_cmd_ring"));
  1614. goto fail1;
  1615. }
  1616. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  1617. TCL_STATUS_RING_SIZE)) {
  1618. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1619. FL("dp_srng_setup failed for tcl_status_ring"));
  1620. goto fail1;
  1621. }
  1622. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  1623. * descriptors
  1624. */
  1625. /* Rx data rings */
  1626. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1627. soc->num_reo_dest_rings =
  1628. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  1629. QDF_TRACE(QDF_MODULE_ID_DP,
  1630. QDF_TRACE_LEVEL_ERROR,
  1631. FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
  1632. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  1633. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  1634. i, 0, REO_DST_RING_SIZE)) {
  1635. QDF_TRACE(QDF_MODULE_ID_DP,
  1636. QDF_TRACE_LEVEL_ERROR,
  1637. FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
  1638. goto fail1;
  1639. }
  1640. }
  1641. } else {
  1642. /* This will be incremented during per pdev ring setup */
  1643. soc->num_reo_dest_rings = 0;
  1644. }
  1645. /* LMAC RxDMA to SW Rings configuration */
  1646. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  1647. /* Only valid for MCL */
  1648. struct dp_pdev *pdev = soc->pdev_list[0];
  1649. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  1650. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
  1651. RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
  1652. QDF_TRACE(QDF_MODULE_ID_DP,
  1653. QDF_TRACE_LEVEL_ERROR,
  1654. FL("dp_srng_setup failed for rxdma_err_dst_ring"));
  1655. goto fail1;
  1656. }
  1657. }
  1658. }
  1659. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  1660. /* REO reinjection ring */
  1661. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  1662. REO_REINJECT_RING_SIZE)) {
  1663. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1664. FL("dp_srng_setup failed for reo_reinject_ring"));
  1665. goto fail1;
  1666. }
  1667. /* Rx release ring */
  1668. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  1669. RX_RELEASE_RING_SIZE)) {
  1670. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1671. FL("dp_srng_setup failed for rx_rel_ring"));
  1672. goto fail1;
  1673. }
  1674. /* Rx exception ring */
  1675. if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
  1676. MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
  1677. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1678. FL("dp_srng_setup failed for reo_exception_ring"));
  1679. goto fail1;
  1680. }
  1681. /* REO command and status rings */
  1682. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  1683. REO_CMD_RING_SIZE)) {
  1684. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1685. FL("dp_srng_setup failed for reo_cmd_ring"));
  1686. goto fail1;
  1687. }
  1688. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  1689. TAILQ_INIT(&soc->rx.reo_cmd_list);
  1690. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  1691. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  1692. REO_STATUS_RING_SIZE)) {
  1693. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1694. FL("dp_srng_setup failed for reo_status_ring"));
  1695. goto fail1;
  1696. }
  1697. qdf_spinlock_create(&soc->ast_lock);
  1698. dp_soc_wds_attach(soc);
  1699. /* Reset the cpu ring map if radio is NSS offloaded */
  1700. if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1701. dp_soc_reset_cpu_ring_map(soc);
  1702. dp_soc_reset_intr_mask(soc);
  1703. }
  1704. /* Setup HW REO */
  1705. qdf_mem_zero(&reo_params, sizeof(reo_params));
  1706. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1707. /*
  1708. * Reo ring remap is not required if both radios
  1709. * are offloaded to NSS
  1710. */
  1711. if (!dp_reo_remap_config(soc,
  1712. &reo_params.remap1,
  1713. &reo_params.remap2))
  1714. goto out;
  1715. reo_params.rx_hash_enabled = true;
  1716. }
  1717. out:
  1718. hal_reo_setup(soc->hal_soc, &reo_params);
  1719. qdf_atomic_set(&soc->cmn_init_done, 1);
  1720. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  1721. return 0;
  1722. fail1:
  1723. /*
  1724. * Cleanup will be done as part of soc_detach, which will
  1725. * be called on pdev attach failure
  1726. */
  1727. fail0:
  1728. return QDF_STATUS_E_FAILURE;
  1729. }
  1730. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  1731. static void dp_lro_hash_setup(struct dp_soc *soc)
  1732. {
  1733. struct cdp_lro_hash_config lro_hash;
  1734. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  1735. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1736. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1737. FL("LRO disabled RX hash disabled"));
  1738. return;
  1739. }
  1740. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  1741. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
  1742. lro_hash.lro_enable = 1;
  1743. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  1744. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  1745. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  1746. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  1747. }
  1748. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
  1749. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  1750. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  1751. LRO_IPV4_SEED_ARR_SZ));
  1752. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  1753. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  1754. LRO_IPV6_SEED_ARR_SZ));
  1755. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  1756. "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
  1757. lro_hash.lro_enable, lro_hash.tcp_flag,
  1758. lro_hash.tcp_flag_mask);
  1759. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  1760. QDF_TRACE_LEVEL_ERROR,
  1761. (void *)lro_hash.toeplitz_hash_ipv4,
  1762. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  1763. LRO_IPV4_SEED_ARR_SZ));
  1764. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  1765. QDF_TRACE_LEVEL_ERROR,
  1766. (void *)lro_hash.toeplitz_hash_ipv6,
  1767. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  1768. LRO_IPV6_SEED_ARR_SZ));
  1769. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  1770. if (soc->cdp_soc.ol_ops->lro_hash_config)
  1771. (void)soc->cdp_soc.ol_ops->lro_hash_config
  1772. (soc->osif_soc, &lro_hash);
  1773. }
  1774. /*
  1775. * dp_rxdma_ring_setup() - configure the RX DMA rings
  1776. * @soc: data path SoC handle
  1777. * @pdev: Physical device handle
  1778. *
  1779. * Return: 0 - success, > 0 - failure
  1780. */
  1781. #ifdef QCA_HOST2FW_RXBUF_RING
  1782. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  1783. struct dp_pdev *pdev)
  1784. {
  1785. int max_mac_rings =
  1786. wlan_cfg_get_num_mac_rings
  1787. (pdev->wlan_cfg_ctx);
  1788. int i;
  1789. for (i = 0; i < max_mac_rings; i++) {
  1790. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1791. "%s: pdev_id %d mac_id %d\n",
  1792. __func__, pdev->pdev_id, i);
  1793. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  1794. RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
  1795. QDF_TRACE(QDF_MODULE_ID_DP,
  1796. QDF_TRACE_LEVEL_ERROR,
  1797. FL("failed rx mac ring setup"));
  1798. return QDF_STATUS_E_FAILURE;
  1799. }
  1800. }
  1801. return QDF_STATUS_SUCCESS;
  1802. }
  1803. #else
  1804. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  1805. struct dp_pdev *pdev)
  1806. {
  1807. return QDF_STATUS_SUCCESS;
  1808. }
  1809. #endif
  1810. /**
  1811. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  1812. * @pdev - DP_PDEV handle
  1813. *
  1814. * Return: void
  1815. */
  1816. static inline void
  1817. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  1818. {
  1819. uint8_t map_id;
  1820. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  1821. qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
  1822. sizeof(default_dscp_tid_map));
  1823. }
  1824. for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
  1825. hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
  1826. pdev->dscp_tid_map[map_id],
  1827. map_id);
  1828. }
  1829. }
  1830. /*
  1831. * dp_pdev_attach_wifi3() - attach txrx pdev
  1832. * @osif_pdev: Opaque PDEV handle from OSIF/HDD
  1833. * @txrx_soc: Datapath SOC handle
  1834. * @htc_handle: HTC handle for host-target interface
  1835. * @qdf_osdev: QDF OS device
  1836. * @pdev_id: PDEV ID
  1837. *
  1838. * Return: DP PDEV handle on success, NULL on failure
  1839. */
  1840. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  1841. struct cdp_cfg *ctrl_pdev,
  1842. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  1843. {
  1844. int tx_ring_size;
  1845. int tx_comp_ring_size;
  1846. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1847. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  1848. if (!pdev) {
  1849. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1850. FL("DP PDEV memory allocation failed"));
  1851. goto fail0;
  1852. }
  1853. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
  1854. if (!pdev->wlan_cfg_ctx) {
  1855. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1856. FL("pdev cfg_attach failed"));
  1857. qdf_mem_free(pdev);
  1858. goto fail0;
  1859. }
  1860. /*
  1861. * set nss pdev config based on soc config
  1862. */
  1863. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  1864. (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
  1865. pdev->soc = soc;
  1866. pdev->osif_pdev = ctrl_pdev;
  1867. pdev->pdev_id = pdev_id;
  1868. soc->pdev_list[pdev_id] = pdev;
  1869. soc->pdev_count++;
  1870. TAILQ_INIT(&pdev->vdev_list);
  1871. pdev->vdev_count = 0;
  1872. qdf_spinlock_create(&pdev->tx_mutex);
  1873. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  1874. TAILQ_INIT(&pdev->neighbour_peers_list);
  1875. if (dp_soc_cmn_setup(soc)) {
  1876. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1877. FL("dp_soc_cmn_setup failed"));
  1878. goto fail1;
  1879. }
  1880. /* Setup per PDEV TCL rings if configured */
  1881. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1882. tx_ring_size =
  1883. wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
  1884. tx_comp_ring_size =
  1885. wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
  1886. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  1887. pdev_id, pdev_id, tx_ring_size)) {
  1888. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1889. FL("dp_srng_setup failed for tcl_data_ring"));
  1890. goto fail1;
  1891. }
  1892. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  1893. WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
  1894. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1895. FL("dp_srng_setup failed for tx_comp_ring"));
  1896. goto fail1;
  1897. }
  1898. soc->num_tcl_data_rings++;
  1899. }
  1900. /* Tx specific init */
  1901. if (dp_tx_pdev_attach(pdev)) {
  1902. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1903. FL("dp_tx_pdev_attach failed"));
  1904. goto fail1;
  1905. }
  1906. /* Setup per PDEV REO rings if configured */
  1907. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1908. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  1909. pdev_id, pdev_id, REO_DST_RING_SIZE)) {
  1910. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1911. FL("dp_srng_setup failed for reo_dest_ringn"));
  1912. goto fail1;
  1913. }
  1914. soc->num_reo_dest_rings++;
  1915. }
  1916. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  1917. RXDMA_REFILL_RING_SIZE)) {
  1918. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1919. FL("dp_srng_setup failed rx refill ring"));
  1920. goto fail1;
  1921. }
  1922. if (dp_rxdma_ring_setup(soc, pdev)) {
  1923. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1924. FL("RXDMA ring config failed"));
  1925. goto fail1;
  1926. }
  1927. if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
  1928. pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
  1929. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1930. FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
  1931. goto fail1;
  1932. }
  1933. if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
  1934. pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
  1935. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1936. FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
  1937. goto fail1;
  1938. }
  1939. if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
  1940. RXDMA_MONITOR_STATUS, 0, pdev_id,
  1941. RXDMA_MONITOR_STATUS_RING_SIZE)) {
  1942. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1943. FL("dp_srng_setup failed for rxdma_mon_status_ring"));
  1944. goto fail1;
  1945. }
  1946. if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
  1947. RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
  1948. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1949. "dp_srng_setup failed for rxdma_mon_desc_ring\n");
  1950. goto fail1;
  1951. }
  1952. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  1953. if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
  1954. 0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
  1955. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1956. FL("dp_srng_setup failed for rxdma_err_dst_ring"));
  1957. goto fail1;
  1958. }
  1959. }
  1960. /* Setup second Rx refill buffer ring */
  1961. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 2,
  1962. pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
  1963. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1964. FL("dp_srng_setup failed second rx refill ring"));
  1965. goto fail1;
  1966. }
  1967. if (dp_ipa_ring_resource_setup(soc, pdev))
  1968. goto fail1;
  1969. if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
  1970. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1971. FL("dp_ipa_uc_attach failed"));
  1972. goto fail1;
  1973. }
  1974. /* Rx specific init */
  1975. if (dp_rx_pdev_attach(pdev)) {
  1976. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1977. FL("dp_rx_pdev_attach failed"));
  1978. goto fail0;
  1979. }
  1980. DP_STATS_INIT(pdev);
  1981. /* Monitor filter init */
  1982. pdev->mon_filter_mode = MON_FILTER_ALL;
  1983. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  1984. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  1985. pdev->fp_data_filter = FILTER_DATA_ALL;
  1986. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  1987. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  1988. pdev->mo_data_filter = FILTER_DATA_ALL;
  1989. #ifndef CONFIG_WIN
  1990. /* MCL */
  1991. dp_local_peer_id_pool_init(pdev);
  1992. #endif
  1993. dp_dscp_tid_map_setup(pdev);
  1994. /* Rx monitor mode specific init */
  1995. if (dp_rx_pdev_mon_attach(pdev)) {
  1996. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1997. "dp_rx_pdev_attach failed\n");
  1998. goto fail1;
  1999. }
  2000. if (dp_wdi_event_attach(pdev)) {
  2001. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2002. "dp_wdi_evet_attach failed\n");
  2003. goto fail1;
  2004. }
  2005. /* set the reo destination during initialization */
  2006. pdev->reo_dest = pdev->pdev_id + 1;
  2007. return (struct cdp_pdev *)pdev;
  2008. fail1:
  2009. dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
  2010. fail0:
  2011. return NULL;
  2012. }
  2013. /*
  2014. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  2015. * @soc: data path SoC handle
  2016. * @pdev: Physical device handle
  2017. *
  2018. * Return: void
  2019. */
  2020. #ifdef QCA_HOST2FW_RXBUF_RING
  2021. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2022. struct dp_pdev *pdev)
  2023. {
  2024. int max_mac_rings =
  2025. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  2026. int i;
  2027. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  2028. max_mac_rings : MAX_RX_MAC_RINGS;
  2029. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2030. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  2031. RXDMA_BUF, 1);
  2032. qdf_timer_free(&soc->mon_reap_timer);
  2033. }
  2034. #else
  2035. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  2036. struct dp_pdev *pdev)
  2037. {
  2038. }
  2039. #endif
  2040. /*
  2041. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  2042. * @pdev: device object
  2043. *
  2044. * Return: void
  2045. */
  2046. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  2047. {
  2048. struct dp_neighbour_peer *peer = NULL;
  2049. struct dp_neighbour_peer *temp_peer = NULL;
  2050. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  2051. neighbour_peer_list_elem, temp_peer) {
  2052. /* delete this peer from the list */
  2053. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  2054. peer, neighbour_peer_list_elem);
  2055. qdf_mem_free(peer);
  2056. }
  2057. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  2058. }
  2059. /*
  2060. * dp_pdev_detach_wifi3() - detach txrx pdev
  2061. * @txrx_pdev: Datapath PDEV handle
  2062. * @force: Force detach
  2063. *
  2064. */
  2065. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  2066. {
  2067. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  2068. struct dp_soc *soc = pdev->soc;
  2069. qdf_nbuf_t curr_nbuf, next_nbuf;
  2070. dp_wdi_event_detach(pdev);
  2071. dp_tx_pdev_detach(pdev);
  2072. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2073. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  2074. TCL_DATA, pdev->pdev_id);
  2075. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  2076. WBM2SW_RELEASE, pdev->pdev_id);
  2077. }
  2078. dp_pktlogmod_exit(pdev);
  2079. dp_rx_pdev_detach(pdev);
  2080. dp_rx_pdev_mon_detach(pdev);
  2081. dp_neighbour_peers_detach(pdev);
  2082. qdf_spinlock_destroy(&pdev->tx_mutex);
  2083. dp_ipa_uc_detach(soc, pdev);
  2084. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 2);
  2085. /* Cleanup per PDEV REO rings if configured */
  2086. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  2087. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  2088. REO_DST, pdev->pdev_id);
  2089. }
  2090. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  2091. dp_rxdma_ring_cleanup(soc, pdev);
  2092. dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
  2093. dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
  2094. dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
  2095. RXDMA_MONITOR_STATUS, 0);
  2096. dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
  2097. RXDMA_MONITOR_DESC, 0);
  2098. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2099. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST, 0);
  2100. } else {
  2101. int i;
  2102. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  2103. dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[i],
  2104. RXDMA_DST, 0);
  2105. }
  2106. curr_nbuf = pdev->invalid_peer_head_msdu;
  2107. while (curr_nbuf) {
  2108. next_nbuf = qdf_nbuf_next(curr_nbuf);
  2109. qdf_nbuf_free(curr_nbuf);
  2110. curr_nbuf = next_nbuf;
  2111. }
  2112. soc->pdev_list[pdev->pdev_id] = NULL;
  2113. soc->pdev_count--;
  2114. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  2115. qdf_mem_free(pdev);
  2116. }
  2117. /*
  2118. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  2119. * @soc: DP SOC handle
  2120. */
  2121. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  2122. {
  2123. struct reo_desc_list_node *desc;
  2124. struct dp_rx_tid *rx_tid;
  2125. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  2126. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  2127. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  2128. rx_tid = &desc->rx_tid;
  2129. qdf_mem_unmap_nbytes_single(soc->osdev,
  2130. rx_tid->hw_qdesc_paddr,
  2131. QDF_DMA_BIDIRECTIONAL,
  2132. rx_tid->hw_qdesc_alloc_size);
  2133. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  2134. qdf_mem_free(desc);
  2135. }
  2136. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  2137. qdf_list_destroy(&soc->reo_desc_freelist);
  2138. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  2139. }
  2140. /*
  2141. * dp_soc_detach_wifi3() - Detach txrx SOC
  2142. * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  2143. */
  2144. static void dp_soc_detach_wifi3(void *txrx_soc)
  2145. {
  2146. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2147. int i;
  2148. qdf_atomic_set(&soc->cmn_init_done, 0);
  2149. qdf_flush_work(&soc->htt_stats.work);
  2150. qdf_disable_work(&soc->htt_stats.work);
  2151. /* Free pending htt stats messages */
  2152. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  2153. for (i = 0; i < MAX_PDEV_CNT; i++) {
  2154. if (soc->pdev_list[i])
  2155. dp_pdev_detach_wifi3(
  2156. (struct cdp_pdev *)soc->pdev_list[i], 1);
  2157. }
  2158. dp_peer_find_detach(soc);
  2159. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  2160. * SW descriptors
  2161. */
  2162. /* Free the ring memories */
  2163. /* Common rings */
  2164. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  2165. dp_tx_soc_detach(soc);
  2166. /* Tx data rings */
  2167. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2168. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2169. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  2170. TCL_DATA, i);
  2171. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  2172. WBM2SW_RELEASE, i);
  2173. }
  2174. }
  2175. /* TCL command and status rings */
  2176. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  2177. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  2178. /* Rx data rings */
  2179. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  2180. soc->num_reo_dest_rings =
  2181. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  2182. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2183. /* TODO: Get number of rings and ring sizes
  2184. * from wlan_cfg
  2185. */
  2186. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  2187. REO_DST, i);
  2188. }
  2189. }
  2190. /* REO reinjection ring */
  2191. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  2192. /* Rx release ring */
  2193. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  2194. /* Rx exception ring */
  2195. /* TODO: Better to store ring_type and ring_num in
  2196. * dp_srng during setup
  2197. */
  2198. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  2199. /* REO command and status rings */
  2200. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  2201. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  2202. dp_hw_link_desc_pool_cleanup(soc);
  2203. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  2204. qdf_spinlock_destroy(&soc->htt_stats.lock);
  2205. htt_soc_detach(soc->htt_handle);
  2206. dp_reo_cmdlist_destroy(soc);
  2207. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  2208. dp_reo_desc_freelist_destroy(soc);
  2209. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  2210. dp_soc_wds_detach(soc);
  2211. qdf_spinlock_destroy(&soc->ast_lock);
  2212. qdf_mem_free(soc);
  2213. }
  2214. /*
  2215. * dp_rxdma_ring_config() - configure the RX DMA rings
  2216. *
  2217. * This function is used to configure the MAC rings.
  2218. * On MCL host provides buffers in Host2FW ring
  2219. * FW refills (copies) buffers to the ring and updates
  2220. * ring_idx in register
  2221. *
  2222. * @soc: data path SoC handle
  2223. *
  2224. * Return: void
  2225. */
  2226. #ifdef QCA_HOST2FW_RXBUF_RING
  2227. static void dp_rxdma_ring_config(struct dp_soc *soc)
  2228. {
  2229. int i;
  2230. for (i = 0; i < MAX_PDEV_CNT; i++) {
  2231. struct dp_pdev *pdev = soc->pdev_list[i];
  2232. if (pdev) {
  2233. int mac_id = 0;
  2234. int j;
  2235. bool dbs_enable = 0;
  2236. int max_mac_rings =
  2237. wlan_cfg_get_num_mac_rings
  2238. (pdev->wlan_cfg_ctx);
  2239. htt_srng_setup(soc->htt_handle, 0,
  2240. pdev->rx_refill_buf_ring.hal_srng,
  2241. RXDMA_BUF);
  2242. if (pdev->rx_refill_buf_ring2.hal_srng)
  2243. htt_srng_setup(soc->htt_handle, 0,
  2244. pdev->rx_refill_buf_ring2.hal_srng,
  2245. RXDMA_BUF);
  2246. if (soc->cdp_soc.ol_ops->
  2247. is_hw_dbs_2x2_capable) {
  2248. dbs_enable = soc->cdp_soc.ol_ops->
  2249. is_hw_dbs_2x2_capable(soc->psoc);
  2250. }
  2251. if (dbs_enable) {
  2252. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2253. QDF_TRACE_LEVEL_ERROR,
  2254. FL("DBS enabled max_mac_rings %d\n"),
  2255. max_mac_rings);
  2256. } else {
  2257. max_mac_rings = 1;
  2258. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2259. QDF_TRACE_LEVEL_ERROR,
  2260. FL("DBS disabled, max_mac_rings %d\n"),
  2261. max_mac_rings);
  2262. }
  2263. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2264. FL("pdev_id %d max_mac_rings %d\n"),
  2265. pdev->pdev_id, max_mac_rings);
  2266. for (j = 0; j < max_mac_rings; j++) {
  2267. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2268. QDF_TRACE_LEVEL_ERROR,
  2269. FL("mac_id %d\n"), mac_id);
  2270. htt_srng_setup(soc->htt_handle, mac_id,
  2271. pdev->rx_mac_buf_ring[j]
  2272. .hal_srng,
  2273. RXDMA_BUF);
  2274. htt_srng_setup(soc->htt_handle, mac_id,
  2275. pdev->rxdma_err_dst_ring[j]
  2276. .hal_srng,
  2277. RXDMA_DST);
  2278. mac_id++;
  2279. }
  2280. /* Configure monitor mode rings */
  2281. htt_srng_setup(soc->htt_handle, i,
  2282. pdev->rxdma_mon_buf_ring.hal_srng,
  2283. RXDMA_MONITOR_BUF);
  2284. htt_srng_setup(soc->htt_handle, i,
  2285. pdev->rxdma_mon_dst_ring.hal_srng,
  2286. RXDMA_MONITOR_DST);
  2287. htt_srng_setup(soc->htt_handle, i,
  2288. pdev->rxdma_mon_status_ring.hal_srng,
  2289. RXDMA_MONITOR_STATUS);
  2290. htt_srng_setup(soc->htt_handle, i,
  2291. pdev->rxdma_mon_desc_ring.hal_srng,
  2292. RXDMA_MONITOR_DESC);
  2293. }
  2294. }
  2295. /*
  2296. * Timer to reap rxdma status rings.
  2297. * Needed until we enable ppdu end interrupts
  2298. */
  2299. qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
  2300. dp_service_mon_rings, (void *)soc,
  2301. QDF_TIMER_TYPE_WAKE_APPS);
  2302. soc->reap_timer_init = 1;
  2303. }
  2304. #else
  2305. static void dp_rxdma_ring_config(struct dp_soc *soc)
  2306. {
  2307. int i;
  2308. for (i = 0; i < MAX_PDEV_CNT; i++) {
  2309. struct dp_pdev *pdev = soc->pdev_list[i];
  2310. if (pdev) {
  2311. int ring_idx = dp_get_ring_id_for_mac_id(soc, i);
  2312. htt_srng_setup(soc->htt_handle, i,
  2313. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  2314. htt_srng_setup(soc->htt_handle, i,
  2315. pdev->rxdma_mon_buf_ring.hal_srng,
  2316. RXDMA_MONITOR_BUF);
  2317. htt_srng_setup(soc->htt_handle, i,
  2318. pdev->rxdma_mon_dst_ring.hal_srng,
  2319. RXDMA_MONITOR_DST);
  2320. htt_srng_setup(soc->htt_handle, i,
  2321. pdev->rxdma_mon_status_ring.hal_srng,
  2322. RXDMA_MONITOR_STATUS);
  2323. htt_srng_setup(soc->htt_handle, i,
  2324. pdev->rxdma_mon_desc_ring.hal_srng,
  2325. RXDMA_MONITOR_DESC);
  2326. htt_srng_setup(soc->htt_handle, i,
  2327. pdev->rxdma_err_dst_ring[ring_idx].hal_srng,
  2328. RXDMA_DST);
  2329. }
  2330. }
  2331. }
  2332. #endif
  2333. /*
  2334. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  2335. * @txrx_soc: Datapath SOC handle
  2336. */
  2337. static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  2338. {
  2339. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  2340. htt_soc_attach_target(soc->htt_handle);
  2341. dp_rxdma_ring_config(soc);
  2342. DP_STATS_INIT(soc);
  2343. /* initialize work queue for stats processing */
  2344. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  2345. return 0;
  2346. }
  2347. /*
  2348. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  2349. * @txrx_soc: Datapath SOC handle
  2350. */
  2351. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  2352. {
  2353. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  2354. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  2355. }
  2356. /*
  2357. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  2358. * @txrx_soc: Datapath SOC handle
  2359. * @nss_cfg: nss config
  2360. */
  2361. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  2362. {
  2363. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  2364. wlan_cfg_set_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx, config);
  2365. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2366. FL("nss-wifi<0> nss config is enabled"));
  2367. }
  2368. /*
  2369. * dp_vdev_attach_wifi3() - attach txrx vdev
  2370. * @txrx_pdev: Datapath PDEV handle
  2371. * @vdev_mac_addr: MAC address of the virtual interface
  2372. * @vdev_id: VDEV Id
  2373. * @wlan_op_mode: VDEV operating mode
  2374. *
  2375. * Return: DP VDEV handle on success, NULL on failure
  2376. */
  2377. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  2378. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  2379. {
  2380. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  2381. struct dp_soc *soc = pdev->soc;
  2382. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  2383. int tx_ring_size;
  2384. if (!vdev) {
  2385. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2386. FL("DP VDEV memory allocation failed"));
  2387. goto fail0;
  2388. }
  2389. vdev->pdev = pdev;
  2390. vdev->vdev_id = vdev_id;
  2391. vdev->opmode = op_mode;
  2392. vdev->osdev = soc->osdev;
  2393. vdev->osif_rx = NULL;
  2394. vdev->osif_rsim_rx_decap = NULL;
  2395. vdev->osif_get_key = NULL;
  2396. vdev->osif_rx_mon = NULL;
  2397. vdev->osif_tx_free_ext = NULL;
  2398. vdev->osif_vdev = NULL;
  2399. vdev->delete.pending = 0;
  2400. vdev->safemode = 0;
  2401. vdev->drop_unenc = 1;
  2402. vdev->sec_type = cdp_sec_type_none;
  2403. #ifdef notyet
  2404. vdev->filters_num = 0;
  2405. #endif
  2406. qdf_mem_copy(
  2407. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  2408. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  2409. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  2410. vdev->dscp_tid_map_id = 0;
  2411. vdev->mcast_enhancement_en = 0;
  2412. tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
  2413. /* TODO: Initialize default HTT meta data that will be used in
  2414. * TCL descriptors for packets transmitted from this VDEV
  2415. */
  2416. TAILQ_INIT(&vdev->peer_list);
  2417. /* add this vdev into the pdev's list */
  2418. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  2419. pdev->vdev_count++;
  2420. dp_tx_vdev_attach(vdev);
  2421. if (QDF_STATUS_SUCCESS != dp_tx_flow_pool_map_handler(pdev, vdev_id,
  2422. FLOW_TYPE_VDEV, vdev_id, tx_ring_size))
  2423. goto fail1;
  2424. if ((soc->intr_mode == DP_INTR_POLL) &&
  2425. wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  2426. if (pdev->vdev_count == 1)
  2427. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  2428. }
  2429. dp_lro_hash_setup(soc);
  2430. /* LRO */
  2431. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  2432. wlan_op_mode_sta == vdev->opmode)
  2433. vdev->lro_enable = true;
  2434. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2435. "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
  2436. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2437. "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
  2438. DP_STATS_INIT(vdev);
  2439. return (struct cdp_vdev *)vdev;
  2440. fail1:
  2441. dp_tx_vdev_detach(vdev);
  2442. qdf_mem_free(vdev);
  2443. fail0:
  2444. return NULL;
  2445. }
  2446. /**
  2447. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  2448. * @vdev: Datapath VDEV handle
  2449. * @osif_vdev: OSIF vdev handle
  2450. * @txrx_ops: Tx and Rx operations
  2451. *
  2452. * Return: DP VDEV handle on success, NULL on failure
  2453. */
  2454. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  2455. void *osif_vdev,
  2456. struct ol_txrx_ops *txrx_ops)
  2457. {
  2458. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2459. vdev->osif_vdev = osif_vdev;
  2460. vdev->osif_rx = txrx_ops->rx.rx;
  2461. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  2462. vdev->osif_get_key = txrx_ops->get_key;
  2463. vdev->osif_rx_mon = txrx_ops->rx.mon;
  2464. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  2465. #ifdef notyet
  2466. #if ATH_SUPPORT_WAPI
  2467. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  2468. #endif
  2469. #endif
  2470. #ifdef UMAC_SUPPORT_PROXY_ARP
  2471. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  2472. #endif
  2473. vdev->me_convert = txrx_ops->me_convert;
  2474. /* TODO: Enable the following once Tx code is integrated */
  2475. txrx_ops->tx.tx = dp_tx_send;
  2476. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  2477. "DP Vdev Register success");
  2478. }
  2479. /*
  2480. * dp_vdev_detach_wifi3() - Detach txrx vdev
  2481. * @txrx_vdev: Datapath VDEV handle
  2482. * @callback: Callback OL_IF on completion of detach
  2483. * @cb_context: Callback context
  2484. *
  2485. */
  2486. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  2487. ol_txrx_vdev_delete_cb callback, void *cb_context)
  2488. {
  2489. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2490. struct dp_pdev *pdev = vdev->pdev;
  2491. struct dp_soc *soc = pdev->soc;
  2492. /* preconditions */
  2493. qdf_assert(vdev);
  2494. /* remove the vdev from its parent pdev's list */
  2495. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  2496. /*
  2497. * Use peer_ref_mutex while accessing peer_list, in case
  2498. * a peer is in the process of being removed from the list.
  2499. */
  2500. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2501. /* check that the vdev has no peers allocated */
  2502. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  2503. /* debug print - will be removed later */
  2504. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  2505. FL("not deleting vdev object %pK (%pM)"
  2506. "until deletion finishes for all its peers"),
  2507. vdev, vdev->mac_addr.raw);
  2508. /* indicate that the vdev needs to be deleted */
  2509. vdev->delete.pending = 1;
  2510. vdev->delete.callback = callback;
  2511. vdev->delete.context = cb_context;
  2512. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2513. return;
  2514. }
  2515. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2516. dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id, FLOW_TYPE_VDEV,
  2517. vdev->vdev_id);
  2518. dp_tx_vdev_detach(vdev);
  2519. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2520. FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
  2521. qdf_mem_free(vdev);
  2522. if (callback)
  2523. callback(cb_context);
  2524. }
  2525. /*
  2526. * dp_peer_create_wifi3() - attach txrx peer
  2527. * @txrx_vdev: Datapath VDEV handle
  2528. * @peer_mac_addr: Peer MAC address
  2529. *
  2530. * Return: DP peeer handle on success, NULL on failure
  2531. */
  2532. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  2533. uint8_t *peer_mac_addr)
  2534. {
  2535. struct dp_peer *peer;
  2536. int i;
  2537. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2538. struct dp_pdev *pdev;
  2539. struct dp_soc *soc;
  2540. /* preconditions */
  2541. qdf_assert(vdev);
  2542. qdf_assert(peer_mac_addr);
  2543. pdev = vdev->pdev;
  2544. soc = pdev->soc;
  2545. #ifdef notyet
  2546. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  2547. soc->mempool_ol_ath_peer);
  2548. #else
  2549. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  2550. #endif
  2551. if (!peer)
  2552. return NULL; /* failure */
  2553. qdf_mem_zero(peer, sizeof(struct dp_peer));
  2554. TAILQ_INIT(&peer->ast_entry_list);
  2555. /* store provided params */
  2556. peer->vdev = vdev;
  2557. dp_peer_add_ast(soc, peer, peer_mac_addr, dp_ast_type_static);
  2558. qdf_spinlock_create(&peer->peer_info_lock);
  2559. qdf_mem_copy(
  2560. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  2561. /* TODO: See of rx_opt_proc is really required */
  2562. peer->rx_opt_proc = soc->rx_opt_proc;
  2563. /* initialize the peer_id */
  2564. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  2565. peer->peer_ids[i] = HTT_INVALID_PEER;
  2566. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2567. qdf_atomic_init(&peer->ref_cnt);
  2568. /* keep one reference for attach */
  2569. qdf_atomic_inc(&peer->ref_cnt);
  2570. /* add this peer into the vdev's list */
  2571. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  2572. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2573. /* TODO: See if hash based search is required */
  2574. dp_peer_find_hash_add(soc, peer);
  2575. /* Initialize the peer state */
  2576. peer->state = OL_TXRX_PEER_STATE_DISC;
  2577. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2578. "vdev %pK created peer %pK (%pM) ref_cnt: %d",
  2579. vdev, peer, peer->mac_addr.raw,
  2580. qdf_atomic_read(&peer->ref_cnt));
  2581. /*
  2582. * For every peer MAp message search and set if bss_peer
  2583. */
  2584. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  2585. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2586. "vdev bss_peer!!!!");
  2587. peer->bss_peer = 1;
  2588. vdev->vap_bss_peer = peer;
  2589. }
  2590. #ifndef CONFIG_WIN
  2591. dp_local_peer_id_alloc(pdev, peer);
  2592. #endif
  2593. DP_STATS_INIT(peer);
  2594. return (void *)peer;
  2595. }
  2596. /*
  2597. * dp_peer_setup_wifi3() - initialize the peer
  2598. * @vdev_hdl: virtual device object
  2599. * @peer: Peer object
  2600. *
  2601. * Return: void
  2602. */
  2603. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  2604. {
  2605. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  2606. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  2607. struct dp_pdev *pdev;
  2608. struct dp_soc *soc;
  2609. bool hash_based = 0;
  2610. enum cdp_host_reo_dest_ring reo_dest;
  2611. /* preconditions */
  2612. qdf_assert(vdev);
  2613. qdf_assert(peer);
  2614. pdev = vdev->pdev;
  2615. soc = pdev->soc;
  2616. dp_peer_rx_init(pdev, peer);
  2617. peer->last_assoc_rcvd = 0;
  2618. peer->last_disassoc_rcvd = 0;
  2619. peer->last_deauth_rcvd = 0;
  2620. /*
  2621. * hash based steering is disabled for Radios which are offloaded
  2622. * to NSS
  2623. */
  2624. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  2625. hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  2626. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2627. FL("hash based steering for pdev: %d is %d\n"),
  2628. pdev->pdev_id, hash_based);
  2629. /*
  2630. * Below line of code will ensure the proper reo_dest ring is choosen
  2631. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  2632. */
  2633. reo_dest = pdev->reo_dest;
  2634. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  2635. /* TODO: Check the destination ring number to be passed to FW */
  2636. soc->cdp_soc.ol_ops->peer_set_default_routing(
  2637. pdev->osif_pdev, peer->mac_addr.raw,
  2638. peer->vdev->vdev_id, hash_based, reo_dest);
  2639. }
  2640. return;
  2641. }
  2642. /*
  2643. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  2644. * @vdev_handle: virtual device object
  2645. * @htt_pkt_type: type of pkt
  2646. *
  2647. * Return: void
  2648. */
  2649. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  2650. enum htt_cmn_pkt_type val)
  2651. {
  2652. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2653. vdev->tx_encap_type = val;
  2654. }
  2655. /*
  2656. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  2657. * @vdev_handle: virtual device object
  2658. * @htt_pkt_type: type of pkt
  2659. *
  2660. * Return: void
  2661. */
  2662. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  2663. enum htt_cmn_pkt_type val)
  2664. {
  2665. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2666. vdev->rx_decap_type = val;
  2667. }
  2668. /*
  2669. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  2670. * @pdev_handle: physical device object
  2671. * @val: reo destination ring index (1 - 4)
  2672. *
  2673. * Return: void
  2674. */
  2675. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  2676. enum cdp_host_reo_dest_ring val)
  2677. {
  2678. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2679. if (pdev)
  2680. pdev->reo_dest = val;
  2681. }
  2682. /*
  2683. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  2684. * @pdev_handle: physical device object
  2685. *
  2686. * Return: reo destination ring index
  2687. */
  2688. static enum cdp_host_reo_dest_ring
  2689. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  2690. {
  2691. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2692. if (pdev)
  2693. return pdev->reo_dest;
  2694. else
  2695. return cdp_host_reo_dest_ring_unknown;
  2696. }
  2697. #ifdef QCA_SUPPORT_SON
  2698. static void dp_son_peer_authorize(struct dp_peer *peer)
  2699. {
  2700. struct dp_soc *soc;
  2701. soc = peer->vdev->pdev->soc;
  2702. peer->peer_bs_inact_flag = 0;
  2703. peer->peer_bs_inact = soc->pdev_bs_inact_reload;
  2704. return;
  2705. }
  2706. #else
  2707. static void dp_son_peer_authorize(struct dp_peer *peer)
  2708. {
  2709. return;
  2710. }
  2711. #endif
  2712. /*
  2713. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  2714. * @pdev_handle: device object
  2715. * @val: value to be set
  2716. *
  2717. * Return: void
  2718. */
  2719. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  2720. uint32_t val)
  2721. {
  2722. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2723. /* Enable/Disable smart mesh filtering. This flag will be checked
  2724. * during rx processing to check if packets are from NAC clients.
  2725. */
  2726. pdev->filter_neighbour_peers = val;
  2727. return 0;
  2728. }
  2729. /*
  2730. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  2731. * address for smart mesh filtering
  2732. * @pdev_handle: device object
  2733. * @cmd: Add/Del command
  2734. * @macaddr: nac client mac address
  2735. *
  2736. * Return: void
  2737. */
  2738. static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  2739. uint32_t cmd, uint8_t *macaddr)
  2740. {
  2741. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2742. struct dp_neighbour_peer *peer = NULL;
  2743. if (!macaddr)
  2744. goto fail0;
  2745. /* Store address of NAC (neighbour peer) which will be checked
  2746. * against TA of received packets.
  2747. */
  2748. if (cmd == DP_NAC_PARAM_ADD) {
  2749. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  2750. sizeof(*peer));
  2751. if (!peer) {
  2752. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2753. FL("DP neighbour peer node memory allocation failed"));
  2754. goto fail0;
  2755. }
  2756. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  2757. macaddr, DP_MAC_ADDR_LEN);
  2758. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  2759. /* add this neighbour peer into the list */
  2760. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  2761. neighbour_peer_list_elem);
  2762. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  2763. return 1;
  2764. } else if (cmd == DP_NAC_PARAM_DEL) {
  2765. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  2766. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  2767. neighbour_peer_list_elem) {
  2768. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  2769. macaddr, DP_MAC_ADDR_LEN)) {
  2770. /* delete this peer from the list */
  2771. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  2772. peer, neighbour_peer_list_elem);
  2773. qdf_mem_free(peer);
  2774. break;
  2775. }
  2776. }
  2777. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  2778. return 1;
  2779. }
  2780. fail0:
  2781. return 0;
  2782. }
  2783. /*
  2784. * dp_get_sec_type() - Get the security type
  2785. * @peer: Datapath peer handle
  2786. * @sec_idx: Security id (mcast, ucast)
  2787. *
  2788. * return sec_type: Security type
  2789. */
  2790. static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
  2791. {
  2792. struct dp_peer *dpeer = (struct dp_peer *)peer;
  2793. return dpeer->security[sec_idx].sec_type;
  2794. }
  2795. /*
  2796. * dp_peer_authorize() - authorize txrx peer
  2797. * @peer_handle: Datapath peer handle
  2798. * @authorize
  2799. *
  2800. */
  2801. static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
  2802. {
  2803. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2804. struct dp_soc *soc;
  2805. if (peer != NULL) {
  2806. soc = peer->vdev->pdev->soc;
  2807. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2808. dp_son_peer_authorize(peer);
  2809. peer->authorize = authorize ? 1 : 0;
  2810. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2811. }
  2812. }
  2813. /*
  2814. * dp_peer_unref_delete() - unref and delete peer
  2815. * @peer_handle: Datapath peer handle
  2816. *
  2817. */
  2818. void dp_peer_unref_delete(void *peer_handle)
  2819. {
  2820. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2821. struct dp_vdev *vdev = peer->vdev;
  2822. struct dp_pdev *pdev = vdev->pdev;
  2823. struct dp_soc *soc = pdev->soc;
  2824. struct dp_peer *tmppeer;
  2825. int found = 0;
  2826. uint16_t peer_id;
  2827. /*
  2828. * Hold the lock all the way from checking if the peer ref count
  2829. * is zero until the peer references are removed from the hash
  2830. * table and vdev list (if the peer ref count is zero).
  2831. * This protects against a new HL tx operation starting to use the
  2832. * peer object just after this function concludes it's done being used.
  2833. * Furthermore, the lock needs to be held while checking whether the
  2834. * vdev's list of peers is empty, to make sure that list is not modified
  2835. * concurrently with the empty check.
  2836. */
  2837. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2838. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2839. "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
  2840. peer, qdf_atomic_read(&peer->ref_cnt));
  2841. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  2842. peer_id = peer->peer_ids[0];
  2843. /*
  2844. * Make sure that the reference to the peer in
  2845. * peer object map is removed
  2846. */
  2847. if (peer_id != HTT_INVALID_PEER)
  2848. soc->peer_id_to_obj_map[peer_id] = NULL;
  2849. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2850. "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
  2851. /* remove the reference to the peer from the hash table */
  2852. dp_peer_find_hash_remove(soc, peer);
  2853. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  2854. if (tmppeer == peer) {
  2855. found = 1;
  2856. break;
  2857. }
  2858. }
  2859. if (found) {
  2860. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  2861. peer_list_elem);
  2862. } else {
  2863. /*Ignoring the remove operation as peer not found*/
  2864. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  2865. "peer %pK not found in vdev (%pK)->peer_list:%pK",
  2866. peer, vdev, &peer->vdev->peer_list);
  2867. }
  2868. /* cleanup the peer data */
  2869. dp_peer_cleanup(vdev, peer);
  2870. /* check whether the parent vdev has no peers left */
  2871. if (TAILQ_EMPTY(&vdev->peer_list)) {
  2872. /*
  2873. * Now that there are no references to the peer, we can
  2874. * release the peer reference lock.
  2875. */
  2876. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2877. /*
  2878. * Check if the parent vdev was waiting for its peers
  2879. * to be deleted, in order for it to be deleted too.
  2880. */
  2881. if (vdev->delete.pending) {
  2882. ol_txrx_vdev_delete_cb vdev_delete_cb =
  2883. vdev->delete.callback;
  2884. void *vdev_delete_context =
  2885. vdev->delete.context;
  2886. QDF_TRACE(QDF_MODULE_ID_DP,
  2887. QDF_TRACE_LEVEL_INFO_HIGH,
  2888. FL("deleting vdev object %pK (%pM)"
  2889. " - its last peer is done"),
  2890. vdev, vdev->mac_addr.raw);
  2891. /* all peers are gone, go ahead and delete it */
  2892. qdf_mem_free(vdev);
  2893. if (vdev_delete_cb)
  2894. vdev_delete_cb(vdev_delete_context);
  2895. }
  2896. } else {
  2897. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2898. }
  2899. #ifdef notyet
  2900. qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
  2901. #else
  2902. qdf_mem_free(peer);
  2903. #endif
  2904. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  2905. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
  2906. vdev->vdev_id, peer->mac_addr.raw);
  2907. }
  2908. } else {
  2909. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2910. }
  2911. }
  2912. /*
  2913. * dp_peer_detach_wifi3() – Detach txrx peer
  2914. * @peer_handle: Datapath peer handle
  2915. * @bitmap: bitmap indicating special handling of request.
  2916. *
  2917. */
  2918. static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
  2919. {
  2920. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2921. /* redirect the peer's rx delivery function to point to a
  2922. * discard func
  2923. */
  2924. peer->rx_opt_proc = dp_rx_discard;
  2925. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2926. FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
  2927. #ifndef CONFIG_WIN
  2928. dp_local_peer_id_free(peer->vdev->pdev, peer);
  2929. #endif
  2930. qdf_spinlock_destroy(&peer->peer_info_lock);
  2931. /*
  2932. * Remove the reference added during peer_attach.
  2933. * The peer will still be left allocated until the
  2934. * PEER_UNMAP message arrives to remove the other
  2935. * reference, added by the PEER_MAP message.
  2936. */
  2937. dp_peer_unref_delete(peer_handle);
  2938. }
  2939. /*
  2940. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  2941. * @peer_handle: Datapath peer handle
  2942. *
  2943. */
  2944. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  2945. {
  2946. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  2947. return vdev->mac_addr.raw;
  2948. }
  2949. /*
  2950. * dp_vdev_set_wds() - Enable per packet stats
  2951. * @vdev_handle: DP VDEV handle
  2952. * @val: value
  2953. *
  2954. * Return: none
  2955. */
  2956. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  2957. {
  2958. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2959. vdev->wds_enabled = val;
  2960. return 0;
  2961. }
  2962. /*
  2963. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  2964. * @peer_handle: Datapath peer handle
  2965. *
  2966. */
  2967. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  2968. uint8_t vdev_id)
  2969. {
  2970. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  2971. struct dp_vdev *vdev = NULL;
  2972. if (qdf_unlikely(!pdev))
  2973. return NULL;
  2974. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  2975. if (vdev->vdev_id == vdev_id)
  2976. break;
  2977. }
  2978. return (struct cdp_vdev *)vdev;
  2979. }
  2980. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  2981. {
  2982. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2983. return vdev->opmode;
  2984. }
  2985. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  2986. {
  2987. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  2988. struct dp_pdev *pdev = vdev->pdev;
  2989. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  2990. }
  2991. /**
  2992. * dp_reset_monitor_mode() - Disable monitor mode
  2993. * @pdev_handle: Datapath PDEV handle
  2994. *
  2995. * Return: 0 on success, not 0 on failure
  2996. */
  2997. static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
  2998. {
  2999. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3000. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  3001. struct dp_soc *soc;
  3002. uint8_t pdev_id;
  3003. pdev_id = pdev->pdev_id;
  3004. soc = pdev->soc;
  3005. pdev->monitor_vdev = NULL;
  3006. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  3007. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3008. pdev->rxdma_mon_buf_ring.hal_srng,
  3009. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  3010. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3011. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  3012. RX_BUFFER_SIZE, &htt_tlv_filter);
  3013. return 0;
  3014. }
  3015. /**
  3016. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  3017. * @vdev_handle: Datapath VDEV handle
  3018. * @smart_monitor: Flag to denote if its smart monitor mode
  3019. *
  3020. * Return: 0 on success, not 0 on failure
  3021. */
  3022. static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  3023. uint8_t smart_monitor)
  3024. {
  3025. /* Many monitor VAPs can exists in a system but only one can be up at
  3026. * anytime
  3027. */
  3028. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3029. struct dp_pdev *pdev;
  3030. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  3031. struct dp_soc *soc;
  3032. uint8_t pdev_id;
  3033. qdf_assert(vdev);
  3034. pdev = vdev->pdev;
  3035. pdev_id = pdev->pdev_id;
  3036. soc = pdev->soc;
  3037. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  3038. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  3039. pdev, pdev_id, soc, vdev);
  3040. /*Check if current pdev's monitor_vdev exists */
  3041. if (pdev->monitor_vdev) {
  3042. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3043. "vdev=%pK\n", vdev);
  3044. qdf_assert(vdev);
  3045. }
  3046. pdev->monitor_vdev = vdev;
  3047. /* If smart monitor mode, do not configure monitor ring */
  3048. if (smart_monitor)
  3049. return QDF_STATUS_SUCCESS;
  3050. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  3051. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
  3052. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  3053. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  3054. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  3055. pdev->mo_data_filter);
  3056. htt_tlv_filter.mpdu_start = 1;
  3057. htt_tlv_filter.msdu_start = 1;
  3058. htt_tlv_filter.packet = 1;
  3059. htt_tlv_filter.msdu_end = 1;
  3060. htt_tlv_filter.mpdu_end = 1;
  3061. htt_tlv_filter.packet_header = 1;
  3062. htt_tlv_filter.attention = 1;
  3063. htt_tlv_filter.ppdu_start = 0;
  3064. htt_tlv_filter.ppdu_end = 0;
  3065. htt_tlv_filter.ppdu_end_user_stats = 0;
  3066. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  3067. htt_tlv_filter.ppdu_end_status_done = 0;
  3068. htt_tlv_filter.header_per_msdu = 1;
  3069. htt_tlv_filter.enable_fp =
  3070. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  3071. htt_tlv_filter.enable_md = 0;
  3072. htt_tlv_filter.enable_mo =
  3073. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  3074. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  3075. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  3076. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  3077. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  3078. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  3079. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  3080. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3081. pdev->rxdma_mon_buf_ring.hal_srng,
  3082. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  3083. htt_tlv_filter.mpdu_start = 1;
  3084. htt_tlv_filter.msdu_start = 1;
  3085. htt_tlv_filter.packet = 0;
  3086. htt_tlv_filter.msdu_end = 1;
  3087. htt_tlv_filter.mpdu_end = 1;
  3088. htt_tlv_filter.packet_header = 1;
  3089. htt_tlv_filter.attention = 1;
  3090. htt_tlv_filter.ppdu_start = 1;
  3091. htt_tlv_filter.ppdu_end = 1;
  3092. htt_tlv_filter.ppdu_end_user_stats = 1;
  3093. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  3094. htt_tlv_filter.ppdu_end_status_done = 1;
  3095. htt_tlv_filter.header_per_msdu = 0;
  3096. htt_tlv_filter.enable_fp =
  3097. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  3098. htt_tlv_filter.enable_md = 0;
  3099. htt_tlv_filter.enable_mo =
  3100. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  3101. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  3102. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  3103. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  3104. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  3105. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  3106. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  3107. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3108. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  3109. RX_BUFFER_SIZE, &htt_tlv_filter);
  3110. return QDF_STATUS_SUCCESS;
  3111. }
  3112. /**
  3113. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  3114. * @pdev_handle: Datapath PDEV handle
  3115. * @filter_val: Flag to select Filter for monitor mode
  3116. * Return: 0 on success, not 0 on failure
  3117. */
  3118. static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
  3119. struct cdp_monitor_filter *filter_val)
  3120. {
  3121. /* Many monitor VAPs can exists in a system but only one can be up at
  3122. * anytime
  3123. */
  3124. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3125. struct dp_vdev *vdev = pdev->monitor_vdev;
  3126. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  3127. struct dp_soc *soc;
  3128. uint8_t pdev_id;
  3129. pdev_id = pdev->pdev_id;
  3130. soc = pdev->soc;
  3131. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  3132. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  3133. pdev, pdev_id, soc, vdev);
  3134. /*Check if current pdev's monitor_vdev exists */
  3135. if (!pdev->monitor_vdev) {
  3136. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3137. "vdev=%pK\n", vdev);
  3138. qdf_assert(vdev);
  3139. }
  3140. /* update filter mode, type in pdev structure */
  3141. pdev->mon_filter_mode = filter_val->mode;
  3142. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  3143. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  3144. pdev->fp_data_filter = filter_val->fp_data;
  3145. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  3146. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  3147. pdev->mo_data_filter = filter_val->mo_data;
  3148. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  3149. "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
  3150. pdev->mon_filter_mode, pdev->fp_mgmt_filter,
  3151. pdev->fp_ctrl_filter, pdev->fp_data_filter,
  3152. pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
  3153. pdev->mo_data_filter);
  3154. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  3155. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3156. pdev->rxdma_mon_buf_ring.hal_srng,
  3157. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  3158. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3159. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  3160. RX_BUFFER_SIZE, &htt_tlv_filter);
  3161. htt_tlv_filter.mpdu_start = 1;
  3162. htt_tlv_filter.msdu_start = 1;
  3163. htt_tlv_filter.packet = 1;
  3164. htt_tlv_filter.msdu_end = 1;
  3165. htt_tlv_filter.mpdu_end = 1;
  3166. htt_tlv_filter.packet_header = 1;
  3167. htt_tlv_filter.attention = 1;
  3168. htt_tlv_filter.ppdu_start = 0;
  3169. htt_tlv_filter.ppdu_end = 0;
  3170. htt_tlv_filter.ppdu_end_user_stats = 0;
  3171. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  3172. htt_tlv_filter.ppdu_end_status_done = 0;
  3173. htt_tlv_filter.header_per_msdu = 1;
  3174. htt_tlv_filter.enable_fp =
  3175. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  3176. htt_tlv_filter.enable_md = 0;
  3177. htt_tlv_filter.enable_mo =
  3178. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  3179. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  3180. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  3181. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  3182. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  3183. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  3184. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  3185. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3186. pdev->rxdma_mon_buf_ring.hal_srng, RXDMA_MONITOR_BUF,
  3187. RX_BUFFER_SIZE, &htt_tlv_filter);
  3188. htt_tlv_filter.mpdu_start = 1;
  3189. htt_tlv_filter.msdu_start = 1;
  3190. htt_tlv_filter.packet = 0;
  3191. htt_tlv_filter.msdu_end = 1;
  3192. htt_tlv_filter.mpdu_end = 1;
  3193. htt_tlv_filter.packet_header = 1;
  3194. htt_tlv_filter.attention = 1;
  3195. htt_tlv_filter.ppdu_start = 1;
  3196. htt_tlv_filter.ppdu_end = 1;
  3197. htt_tlv_filter.ppdu_end_user_stats = 1;
  3198. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  3199. htt_tlv_filter.ppdu_end_status_done = 1;
  3200. htt_tlv_filter.header_per_msdu = 0;
  3201. htt_tlv_filter.enable_fp =
  3202. (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
  3203. htt_tlv_filter.enable_md = 0;
  3204. htt_tlv_filter.enable_mo =
  3205. (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
  3206. htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
  3207. htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
  3208. htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
  3209. htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
  3210. htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
  3211. htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
  3212. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  3213. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  3214. RX_BUFFER_SIZE, &htt_tlv_filter);
  3215. return QDF_STATUS_SUCCESS;
  3216. }
  3217. #ifdef MESH_MODE_SUPPORT
  3218. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  3219. {
  3220. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  3221. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3222. FL("val %d"), val);
  3223. vdev->mesh_vdev = val;
  3224. }
  3225. /*
  3226. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  3227. * @vdev_hdl: virtual device object
  3228. * @val: value to be set
  3229. *
  3230. * Return: void
  3231. */
  3232. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  3233. {
  3234. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  3235. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3236. FL("val %d"), val);
  3237. vdev->mesh_rx_filter = val;
  3238. }
  3239. #endif
  3240. /*
  3241. * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
  3242. * Current scope is bar recieved count
  3243. *
  3244. * @pdev_handle: DP_PDEV handle
  3245. *
  3246. * Return: void
  3247. */
  3248. #define STATS_PROC_TIMEOUT (HZ/10)
  3249. static void
  3250. dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
  3251. {
  3252. struct dp_vdev *vdev;
  3253. struct dp_peer *peer;
  3254. uint32_t waitcnt;
  3255. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  3256. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3257. if (!peer) {
  3258. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3259. FL("DP Invalid Peer refernce"));
  3260. return;
  3261. }
  3262. waitcnt = 0;
  3263. dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
  3264. while (!(qdf_atomic_read(&(pdev->stats.cmd_complete)))
  3265. && waitcnt < 10) {
  3266. schedule_timeout_interruptible(
  3267. STATS_PROC_TIMEOUT);
  3268. waitcnt++;
  3269. }
  3270. qdf_atomic_set(&(pdev->stats.cmd_complete), 0);
  3271. }
  3272. }
  3273. }
  3274. /**
  3275. * dp_rx_bar_stats_cb(): BAR received stats callback
  3276. * @soc: SOC handle
  3277. * @cb_ctxt: Call back context
  3278. * @reo_status: Reo status
  3279. *
  3280. * return: void
  3281. */
  3282. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  3283. union hal_reo_status *reo_status)
  3284. {
  3285. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  3286. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  3287. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  3288. DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
  3289. queue_status->header.status);
  3290. qdf_atomic_set(&(pdev->stats.cmd_complete), 1);
  3291. return;
  3292. }
  3293. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  3294. qdf_atomic_set(&(pdev->stats.cmd_complete), 1);
  3295. }
  3296. /**
  3297. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  3298. * @vdev: DP VDEV handle
  3299. *
  3300. * return: void
  3301. */
  3302. void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
  3303. {
  3304. struct dp_peer *peer = NULL;
  3305. struct dp_soc *soc = vdev->pdev->soc;
  3306. int i;
  3307. uint8_t pream_type;
  3308. qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
  3309. qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
  3310. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3311. for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
  3312. for (i = 0; i < MAX_MCS; i++) {
  3313. DP_STATS_AGGR(vdev, peer,
  3314. tx.pkt_type[pream_type].mcs_count[i]);
  3315. DP_STATS_AGGR(vdev, peer,
  3316. rx.pkt_type[pream_type].mcs_count[i]);
  3317. }
  3318. }
  3319. for (i = 0; i < MAX_BW; i++) {
  3320. DP_STATS_AGGR(vdev, peer, tx.bw[i]);
  3321. DP_STATS_AGGR(vdev, peer, rx.bw[i]);
  3322. }
  3323. for (i = 0; i < SS_COUNT; i++)
  3324. DP_STATS_AGGR(vdev, peer, rx.nss[i]);
  3325. for (i = 0; i < WME_AC_MAX; i++) {
  3326. DP_STATS_AGGR(vdev, peer, tx.wme_ac_type[i]);
  3327. DP_STATS_AGGR(vdev, peer, rx.wme_ac_type[i]);
  3328. DP_STATS_AGGR(vdev, peer, tx.excess_retries_ac[i]);
  3329. }
  3330. for (i = 0; i < MAX_GI; i++) {
  3331. DP_STATS_AGGR(vdev, peer, tx.sgi_count[i]);
  3332. DP_STATS_AGGR(vdev, peer, rx.sgi_count[i]);
  3333. }
  3334. DP_STATS_AGGR_PKT(vdev, peer, tx.comp_pkt);
  3335. DP_STATS_AGGR_PKT(vdev, peer, tx.ucast);
  3336. DP_STATS_AGGR_PKT(vdev, peer, tx.mcast);
  3337. DP_STATS_AGGR_PKT(vdev, peer, tx.tx_success);
  3338. DP_STATS_AGGR(vdev, peer, tx.tx_failed);
  3339. DP_STATS_AGGR(vdev, peer, tx.ofdma);
  3340. DP_STATS_AGGR(vdev, peer, tx.stbc);
  3341. DP_STATS_AGGR(vdev, peer, tx.ldpc);
  3342. DP_STATS_AGGR(vdev, peer, tx.retries);
  3343. DP_STATS_AGGR(vdev, peer, tx.non_amsdu_cnt);
  3344. DP_STATS_AGGR(vdev, peer, tx.amsdu_cnt);
  3345. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_rem);
  3346. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_rem_tx);
  3347. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_rem_notx);
  3348. DP_STATS_AGGR(vdev, peer, tx.dropped.age_out);
  3349. DP_STATS_AGGR(vdev, peer, rx.err.mic_err);
  3350. DP_STATS_AGGR(vdev, peer, rx.err.decrypt_err);
  3351. DP_STATS_AGGR(vdev, peer, rx.non_ampdu_cnt);
  3352. DP_STATS_AGGR(vdev, peer, rx.ampdu_cnt);
  3353. DP_STATS_AGGR(vdev, peer, rx.non_amsdu_cnt);
  3354. DP_STATS_AGGR(vdev, peer, rx.amsdu_cnt);
  3355. DP_STATS_AGGR_PKT(vdev, peer, rx.to_stack);
  3356. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  3357. DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo[i]);
  3358. peer->stats.rx.unicast.num = peer->stats.rx.to_stack.num -
  3359. peer->stats.rx.multicast.num;
  3360. peer->stats.rx.unicast.bytes = peer->stats.rx.to_stack.bytes -
  3361. peer->stats.rx.multicast.bytes;
  3362. DP_STATS_AGGR_PKT(vdev, peer, rx.unicast);
  3363. DP_STATS_AGGR_PKT(vdev, peer, rx.multicast);
  3364. DP_STATS_AGGR_PKT(vdev, peer, rx.wds);
  3365. DP_STATS_AGGR_PKT(vdev, peer, rx.raw);
  3366. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.pkts);
  3367. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.fail);
  3368. vdev->stats.tx.last_ack_rssi =
  3369. peer->stats.tx.last_ack_rssi;
  3370. }
  3371. if (soc->cdp_soc.ol_ops->update_dp_stats)
  3372. soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
  3373. &vdev->stats, vdev->vdev_id, UPDATE_VDEV_STATS);
  3374. }
  3375. /**
  3376. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  3377. * @pdev: DP PDEV handle
  3378. *
  3379. * return: void
  3380. */
  3381. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  3382. {
  3383. struct dp_vdev *vdev = NULL;
  3384. uint8_t i;
  3385. uint8_t pream_type;
  3386. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  3387. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  3388. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  3389. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  3390. dp_aggregate_vdev_stats(vdev);
  3391. for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
  3392. for (i = 0; i < MAX_MCS; i++) {
  3393. DP_STATS_AGGR(pdev, vdev,
  3394. tx.pkt_type[pream_type].mcs_count[i]);
  3395. DP_STATS_AGGR(pdev, vdev,
  3396. rx.pkt_type[pream_type].mcs_count[i]);
  3397. }
  3398. }
  3399. for (i = 0; i < MAX_BW; i++) {
  3400. DP_STATS_AGGR(pdev, vdev, tx.bw[i]);
  3401. DP_STATS_AGGR(pdev, vdev, rx.bw[i]);
  3402. }
  3403. for (i = 0; i < SS_COUNT; i++)
  3404. DP_STATS_AGGR(pdev, vdev, rx.nss[i]);
  3405. for (i = 0; i < WME_AC_MAX; i++) {
  3406. DP_STATS_AGGR(pdev, vdev, tx.wme_ac_type[i]);
  3407. DP_STATS_AGGR(pdev, vdev, rx.wme_ac_type[i]);
  3408. DP_STATS_AGGR(pdev, vdev,
  3409. tx.excess_retries_ac[i]);
  3410. }
  3411. for (i = 0; i < MAX_GI; i++) {
  3412. DP_STATS_AGGR(pdev, vdev, tx.sgi_count[i]);
  3413. DP_STATS_AGGR(pdev, vdev, rx.sgi_count[i]);
  3414. }
  3415. DP_STATS_AGGR_PKT(pdev, vdev, tx.comp_pkt);
  3416. DP_STATS_AGGR_PKT(pdev, vdev, tx.ucast);
  3417. DP_STATS_AGGR_PKT(pdev, vdev, tx.mcast);
  3418. DP_STATS_AGGR_PKT(pdev, vdev, tx.tx_success);
  3419. DP_STATS_AGGR(pdev, vdev, tx.tx_failed);
  3420. DP_STATS_AGGR(pdev, vdev, tx.ofdma);
  3421. DP_STATS_AGGR(pdev, vdev, tx.stbc);
  3422. DP_STATS_AGGR(pdev, vdev, tx.ldpc);
  3423. DP_STATS_AGGR(pdev, vdev, tx.retries);
  3424. DP_STATS_AGGR(pdev, vdev, tx.non_amsdu_cnt);
  3425. DP_STATS_AGGR(pdev, vdev, tx.amsdu_cnt);
  3426. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_rem);
  3427. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_rem_tx);
  3428. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_rem_notx);
  3429. DP_STATS_AGGR(pdev, vdev, tx.dropped.age_out);
  3430. DP_STATS_AGGR(pdev, vdev, rx.err.mic_err);
  3431. DP_STATS_AGGR(pdev, vdev, rx.err.decrypt_err);
  3432. DP_STATS_AGGR(pdev, vdev, rx.non_ampdu_cnt);
  3433. DP_STATS_AGGR(pdev, vdev, rx.ampdu_cnt);
  3434. DP_STATS_AGGR(pdev, vdev, rx.non_amsdu_cnt);
  3435. DP_STATS_AGGR(pdev, vdev, rx.amsdu_cnt);
  3436. DP_STATS_AGGR_PKT(pdev, vdev, rx.to_stack);
  3437. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[0]);
  3438. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[1]);
  3439. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[2]);
  3440. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[3]);
  3441. DP_STATS_AGGR_PKT(pdev, vdev, rx.unicast);
  3442. DP_STATS_AGGR_PKT(pdev, vdev, rx.multicast);
  3443. DP_STATS_AGGR_PKT(pdev, vdev, rx.wds);
  3444. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.pkts);
  3445. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.fail);
  3446. DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
  3447. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
  3448. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  3449. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  3450. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  3451. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  3452. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  3453. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  3454. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  3455. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
  3456. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  3457. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
  3458. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  3459. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  3460. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  3461. DP_STATS_AGGR(pdev, vdev,
  3462. tx_i.mcast_en.dropped_map_error);
  3463. DP_STATS_AGGR(pdev, vdev,
  3464. tx_i.mcast_en.dropped_self_mac);
  3465. DP_STATS_AGGR(pdev, vdev,
  3466. tx_i.mcast_en.dropped_send_fail);
  3467. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  3468. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  3469. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  3470. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  3471. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
  3472. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  3473. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
  3474. DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
  3475. pdev->stats.tx_i.dropped.dropped_pkt.num =
  3476. pdev->stats.tx_i.dropped.dma_error +
  3477. pdev->stats.tx_i.dropped.ring_full +
  3478. pdev->stats.tx_i.dropped.enqueue_fail +
  3479. pdev->stats.tx_i.dropped.desc_na +
  3480. pdev->stats.tx_i.dropped.res_full;
  3481. pdev->stats.tx.last_ack_rssi =
  3482. vdev->stats.tx.last_ack_rssi;
  3483. pdev->stats.tx_i.tso.num_seg =
  3484. vdev->stats.tx_i.tso.num_seg;
  3485. }
  3486. }
  3487. /**
  3488. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  3489. * @pdev: DP_PDEV Handle
  3490. *
  3491. * Return:void
  3492. */
  3493. static inline void
  3494. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  3495. {
  3496. DP_PRINT_STATS("PDEV Tx Stats:\n");
  3497. DP_PRINT_STATS("Received From Stack:");
  3498. DP_PRINT_STATS(" Packets = %d",
  3499. pdev->stats.tx_i.rcvd.num);
  3500. DP_PRINT_STATS(" Bytes = %d",
  3501. pdev->stats.tx_i.rcvd.bytes);
  3502. DP_PRINT_STATS("Processed:");
  3503. DP_PRINT_STATS(" Packets = %d",
  3504. pdev->stats.tx_i.processed.num);
  3505. DP_PRINT_STATS(" Bytes = %d",
  3506. pdev->stats.tx_i.processed.bytes);
  3507. DP_PRINT_STATS("Completions:");
  3508. DP_PRINT_STATS(" Packets = %d",
  3509. pdev->stats.tx.comp_pkt.num);
  3510. DP_PRINT_STATS(" Bytes = %d",
  3511. pdev->stats.tx.comp_pkt.bytes);
  3512. DP_PRINT_STATS("Dropped:");
  3513. DP_PRINT_STATS(" Total = %d",
  3514. pdev->stats.tx_i.dropped.dropped_pkt.num);
  3515. DP_PRINT_STATS(" Dma_map_error = %d",
  3516. pdev->stats.tx_i.dropped.dma_error);
  3517. DP_PRINT_STATS(" Ring Full = %d",
  3518. pdev->stats.tx_i.dropped.ring_full);
  3519. DP_PRINT_STATS(" Descriptor Not available = %d",
  3520. pdev->stats.tx_i.dropped.desc_na);
  3521. DP_PRINT_STATS(" HW enqueue failed= %d",
  3522. pdev->stats.tx_i.dropped.enqueue_fail);
  3523. DP_PRINT_STATS(" Resources Full = %d",
  3524. pdev->stats.tx_i.dropped.res_full);
  3525. DP_PRINT_STATS(" FW removed = %d",
  3526. pdev->stats.tx.dropped.fw_rem);
  3527. DP_PRINT_STATS(" FW removed transmitted = %d",
  3528. pdev->stats.tx.dropped.fw_rem_tx);
  3529. DP_PRINT_STATS(" FW removed untransmitted = %d",
  3530. pdev->stats.tx.dropped.fw_rem_notx);
  3531. DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
  3532. pdev->stats.tx.dropped.age_out);
  3533. DP_PRINT_STATS("Scatter Gather:");
  3534. DP_PRINT_STATS(" Packets = %d",
  3535. pdev->stats.tx_i.sg.sg_pkt.num);
  3536. DP_PRINT_STATS(" Bytes = %d",
  3537. pdev->stats.tx_i.sg.sg_pkt.bytes);
  3538. DP_PRINT_STATS(" Dropped By Host = %d",
  3539. pdev->stats.tx_i.sg.dropped_host);
  3540. DP_PRINT_STATS(" Dropped By Target = %d",
  3541. pdev->stats.tx_i.sg.dropped_target);
  3542. DP_PRINT_STATS("TSO:");
  3543. DP_PRINT_STATS(" Number of Segments = %d",
  3544. pdev->stats.tx_i.tso.num_seg);
  3545. DP_PRINT_STATS(" Packets = %d",
  3546. pdev->stats.tx_i.tso.tso_pkt.num);
  3547. DP_PRINT_STATS(" Bytes = %d",
  3548. pdev->stats.tx_i.tso.tso_pkt.bytes);
  3549. DP_PRINT_STATS(" Dropped By Host = %d",
  3550. pdev->stats.tx_i.tso.dropped_host);
  3551. DP_PRINT_STATS("Mcast Enhancement:");
  3552. DP_PRINT_STATS(" Packets = %d",
  3553. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  3554. DP_PRINT_STATS(" Bytes = %d",
  3555. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  3556. DP_PRINT_STATS(" Dropped: Map Errors = %d",
  3557. pdev->stats.tx_i.mcast_en.dropped_map_error);
  3558. DP_PRINT_STATS(" Dropped: Self Mac = %d",
  3559. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  3560. DP_PRINT_STATS(" Dropped: Send Fail = %d",
  3561. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  3562. DP_PRINT_STATS(" Unicast sent = %d",
  3563. pdev->stats.tx_i.mcast_en.ucast);
  3564. DP_PRINT_STATS("Raw:");
  3565. DP_PRINT_STATS(" Packets = %d",
  3566. pdev->stats.tx_i.raw.raw_pkt.num);
  3567. DP_PRINT_STATS(" Bytes = %d",
  3568. pdev->stats.tx_i.raw.raw_pkt.bytes);
  3569. DP_PRINT_STATS(" DMA map error = %d",
  3570. pdev->stats.tx_i.raw.dma_map_error);
  3571. DP_PRINT_STATS("Reinjected:");
  3572. DP_PRINT_STATS(" Packets = %d",
  3573. pdev->stats.tx_i.reinject_pkts.num);
  3574. DP_PRINT_STATS("Bytes = %d\n",
  3575. pdev->stats.tx_i.reinject_pkts.bytes);
  3576. DP_PRINT_STATS("Inspected:");
  3577. DP_PRINT_STATS(" Packets = %d",
  3578. pdev->stats.tx_i.inspect_pkts.num);
  3579. DP_PRINT_STATS(" Bytes = %d",
  3580. pdev->stats.tx_i.inspect_pkts.bytes);
  3581. DP_PRINT_STATS("Nawds Multicast:");
  3582. DP_PRINT_STATS(" Packets = %d",
  3583. pdev->stats.tx_i.nawds_mcast.num);
  3584. DP_PRINT_STATS(" Bytes = %d",
  3585. pdev->stats.tx_i.nawds_mcast.bytes);
  3586. DP_PRINT_STATS("CCE Classified:");
  3587. DP_TRACE(FATAL, " CCE Classified Packets: %u",
  3588. pdev->stats.tx_i.cce_classified);
  3589. DP_TRACE(FATAL, " RAW CCE Classified Packets: %u",
  3590. pdev->stats.tx_i.cce_classified_raw);
  3591. }
  3592. /**
  3593. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  3594. * @pdev: DP_PDEV Handle
  3595. *
  3596. * Return: void
  3597. */
  3598. static inline void
  3599. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  3600. {
  3601. DP_PRINT_STATS("PDEV Rx Stats:\n");
  3602. DP_PRINT_STATS("Received From HW (Per Rx Ring):");
  3603. DP_PRINT_STATS(" Packets = %d %d %d %d",
  3604. pdev->stats.rx.rcvd_reo[0].num,
  3605. pdev->stats.rx.rcvd_reo[1].num,
  3606. pdev->stats.rx.rcvd_reo[2].num,
  3607. pdev->stats.rx.rcvd_reo[3].num);
  3608. DP_PRINT_STATS(" Bytes = %d %d %d %d",
  3609. pdev->stats.rx.rcvd_reo[0].bytes,
  3610. pdev->stats.rx.rcvd_reo[1].bytes,
  3611. pdev->stats.rx.rcvd_reo[2].bytes,
  3612. pdev->stats.rx.rcvd_reo[3].bytes);
  3613. DP_PRINT_STATS("Replenished:");
  3614. DP_PRINT_STATS(" Packets = %d",
  3615. pdev->stats.replenish.pkts.num);
  3616. DP_PRINT_STATS(" Bytes = %d",
  3617. pdev->stats.replenish.pkts.bytes);
  3618. DP_PRINT_STATS(" Buffers Added To Freelist = %d",
  3619. pdev->stats.buf_freelist);
  3620. DP_PRINT_STATS(" Low threshold intr = %d",
  3621. pdev->stats.replenish.low_thresh_intrs);
  3622. DP_PRINT_STATS("Dropped:");
  3623. DP_PRINT_STATS(" msdu_not_done = %d",
  3624. pdev->stats.dropped.msdu_not_done);
  3625. DP_PRINT_STATS("Sent To Stack:");
  3626. DP_PRINT_STATS(" Packets = %d",
  3627. pdev->stats.rx.to_stack.num);
  3628. DP_PRINT_STATS(" Bytes = %d",
  3629. pdev->stats.rx.to_stack.bytes);
  3630. DP_PRINT_STATS("Multicast/Broadcast:");
  3631. DP_PRINT_STATS(" Packets = %d",
  3632. pdev->stats.rx.multicast.num);
  3633. DP_PRINT_STATS(" Bytes = %d",
  3634. pdev->stats.rx.multicast.bytes);
  3635. DP_PRINT_STATS("Errors:");
  3636. DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
  3637. pdev->stats.replenish.rxdma_err);
  3638. DP_PRINT_STATS(" Desc Alloc Failed: = %d",
  3639. pdev->stats.err.desc_alloc_fail);
  3640. /* Get bar_recv_cnt */
  3641. dp_aggregate_pdev_ctrl_frames_stats(pdev);
  3642. DP_PRINT_STATS("BAR Received Count: = %d",
  3643. pdev->stats.rx.bar_recv_cnt);
  3644. }
  3645. /**
  3646. * dp_print_soc_tx_stats(): Print SOC level stats
  3647. * @soc DP_SOC Handle
  3648. *
  3649. * Return: void
  3650. */
  3651. static inline void
  3652. dp_print_soc_tx_stats(struct dp_soc *soc)
  3653. {
  3654. DP_PRINT_STATS("SOC Tx Stats:\n");
  3655. DP_PRINT_STATS("Tx Descriptors In Use = %d",
  3656. soc->stats.tx.desc_in_use);
  3657. DP_PRINT_STATS("Invalid peer:");
  3658. DP_PRINT_STATS(" Packets = %d",
  3659. soc->stats.tx.tx_invalid_peer.num);
  3660. DP_PRINT_STATS(" Bytes = %d",
  3661. soc->stats.tx.tx_invalid_peer.bytes);
  3662. DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
  3663. soc->stats.tx.tcl_ring_full[0],
  3664. soc->stats.tx.tcl_ring_full[1],
  3665. soc->stats.tx.tcl_ring_full[2]);
  3666. }
  3667. /**
  3668. * dp_print_soc_rx_stats: Print SOC level Rx stats
  3669. * @soc: DP_SOC Handle
  3670. *
  3671. * Return:void
  3672. */
  3673. static inline void
  3674. dp_print_soc_rx_stats(struct dp_soc *soc)
  3675. {
  3676. uint32_t i;
  3677. char reo_error[DP_REO_ERR_LENGTH];
  3678. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  3679. uint8_t index = 0;
  3680. DP_PRINT_STATS("SOC Rx Stats:\n");
  3681. DP_PRINT_STATS("Errors:\n");
  3682. DP_PRINT_STATS("Rx Decrypt Errors = %d",
  3683. (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
  3684. soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
  3685. DP_PRINT_STATS("Invalid RBM = %d",
  3686. soc->stats.rx.err.invalid_rbm);
  3687. DP_PRINT_STATS("Invalid Vdev = %d",
  3688. soc->stats.rx.err.invalid_vdev);
  3689. DP_PRINT_STATS("Invalid Pdev = %d",
  3690. soc->stats.rx.err.invalid_pdev);
  3691. DP_PRINT_STATS("Invalid Peer = %d",
  3692. soc->stats.rx.err.rx_invalid_peer.num);
  3693. DP_PRINT_STATS("HAL Ring Access Fail = %d",
  3694. soc->stats.rx.err.hal_ring_access_fail);
  3695. for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
  3696. index += qdf_snprint(&rxdma_error[index],
  3697. DP_RXDMA_ERR_LENGTH - index,
  3698. " %d", soc->stats.rx.err.rxdma_error[i]);
  3699. }
  3700. DP_PRINT_STATS("RXDMA Error (0-31):%s",
  3701. rxdma_error);
  3702. index = 0;
  3703. for (i = 0; i < HAL_REO_ERR_MAX; i++) {
  3704. index += qdf_snprint(&reo_error[index],
  3705. DP_REO_ERR_LENGTH - index,
  3706. " %d", soc->stats.rx.err.reo_error[i]);
  3707. }
  3708. DP_PRINT_STATS("REO Error(0-14):%s",
  3709. reo_error);
  3710. }
  3711. /**
  3712. * dp_print_ring_stat_from_hal(): Print hal level ring stats
  3713. * @soc: DP_SOC handle
  3714. * @srng: DP_SRNG handle
  3715. * @ring_name: SRNG name
  3716. *
  3717. * Return: void
  3718. */
  3719. static inline void
  3720. dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
  3721. char *ring_name)
  3722. {
  3723. uint32_t tailp;
  3724. uint32_t headp;
  3725. if (srng->hal_srng != NULL) {
  3726. hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
  3727. DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d\n",
  3728. ring_name, headp, tailp);
  3729. }
  3730. }
  3731. /**
  3732. * dp_print_ring_stats(): Print tail and head pointer
  3733. * @pdev: DP_PDEV handle
  3734. *
  3735. * Return:void
  3736. */
  3737. static inline void
  3738. dp_print_ring_stats(struct dp_pdev *pdev)
  3739. {
  3740. uint32_t i;
  3741. char ring_name[STR_MAXLEN + 1];
  3742. dp_print_ring_stat_from_hal(pdev->soc,
  3743. &pdev->soc->reo_exception_ring,
  3744. "Reo Exception Ring");
  3745. dp_print_ring_stat_from_hal(pdev->soc,
  3746. &pdev->soc->reo_reinject_ring,
  3747. "Reo Inject Ring");
  3748. dp_print_ring_stat_from_hal(pdev->soc,
  3749. &pdev->soc->reo_cmd_ring,
  3750. "Reo Command Ring");
  3751. dp_print_ring_stat_from_hal(pdev->soc,
  3752. &pdev->soc->reo_status_ring,
  3753. "Reo Status Ring");
  3754. dp_print_ring_stat_from_hal(pdev->soc,
  3755. &pdev->soc->rx_rel_ring,
  3756. "Rx Release ring");
  3757. dp_print_ring_stat_from_hal(pdev->soc,
  3758. &pdev->soc->tcl_cmd_ring,
  3759. "Tcl command Ring");
  3760. dp_print_ring_stat_from_hal(pdev->soc,
  3761. &pdev->soc->tcl_status_ring,
  3762. "Tcl Status Ring");
  3763. dp_print_ring_stat_from_hal(pdev->soc,
  3764. &pdev->soc->wbm_desc_rel_ring,
  3765. "Wbm Desc Rel Ring");
  3766. for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
  3767. snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
  3768. dp_print_ring_stat_from_hal(pdev->soc,
  3769. &pdev->soc->reo_dest_ring[i],
  3770. ring_name);
  3771. }
  3772. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
  3773. snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
  3774. dp_print_ring_stat_from_hal(pdev->soc,
  3775. &pdev->soc->tcl_data_ring[i],
  3776. ring_name);
  3777. }
  3778. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  3779. snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
  3780. dp_print_ring_stat_from_hal(pdev->soc,
  3781. &pdev->soc->tx_comp_ring[i],
  3782. ring_name);
  3783. }
  3784. dp_print_ring_stat_from_hal(pdev->soc,
  3785. &pdev->rx_refill_buf_ring,
  3786. "Rx Refill Buf Ring");
  3787. dp_print_ring_stat_from_hal(pdev->soc,
  3788. &pdev->rx_refill_buf_ring2,
  3789. "Second Rx Refill Buf Ring");
  3790. dp_print_ring_stat_from_hal(pdev->soc,
  3791. &pdev->rxdma_mon_buf_ring,
  3792. "Rxdma Mon Buf Ring");
  3793. dp_print_ring_stat_from_hal(pdev->soc,
  3794. &pdev->rxdma_mon_dst_ring,
  3795. "Rxdma Mon Dst Ring");
  3796. dp_print_ring_stat_from_hal(pdev->soc,
  3797. &pdev->rxdma_mon_status_ring,
  3798. "Rxdma Mon Status Ring");
  3799. dp_print_ring_stat_from_hal(pdev->soc,
  3800. &pdev->rxdma_mon_desc_ring,
  3801. "Rxdma mon desc Ring");
  3802. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  3803. snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
  3804. dp_print_ring_stat_from_hal(pdev->soc,
  3805. &pdev->rxdma_err_dst_ring[i],
  3806. ring_name);
  3807. }
  3808. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  3809. snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
  3810. dp_print_ring_stat_from_hal(pdev->soc,
  3811. &pdev->rx_mac_buf_ring[i],
  3812. ring_name);
  3813. }
  3814. }
  3815. /**
  3816. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  3817. * @vdev: DP_VDEV handle
  3818. *
  3819. * Return:void
  3820. */
  3821. static inline void
  3822. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  3823. {
  3824. struct dp_peer *peer = NULL;
  3825. DP_STATS_CLR(vdev->pdev);
  3826. DP_STATS_CLR(vdev->pdev->soc);
  3827. DP_STATS_CLR(vdev);
  3828. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3829. if (!peer)
  3830. return;
  3831. DP_STATS_CLR(peer);
  3832. }
  3833. }
  3834. /**
  3835. * dp_print_rx_rates(): Print Rx rate stats
  3836. * @vdev: DP_VDEV handle
  3837. *
  3838. * Return:void
  3839. */
  3840. static inline void
  3841. dp_print_rx_rates(struct dp_vdev *vdev)
  3842. {
  3843. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  3844. uint8_t i, mcs, pkt_type;
  3845. uint8_t index = 0;
  3846. char nss[DP_NSS_LENGTH];
  3847. DP_PRINT_STATS("Rx Rate Info:\n");
  3848. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  3849. index = 0;
  3850. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  3851. if (!dp_rate_string[pkt_type][mcs].valid)
  3852. continue;
  3853. DP_PRINT_STATS(" %s = %d",
  3854. dp_rate_string[pkt_type][mcs].mcs_type,
  3855. pdev->stats.rx.pkt_type[pkt_type].
  3856. mcs_count[mcs]);
  3857. }
  3858. DP_PRINT_STATS("\n");
  3859. }
  3860. index = 0;
  3861. for (i = 0; i < SS_COUNT; i++) {
  3862. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  3863. " %d", pdev->stats.rx.nss[i]);
  3864. }
  3865. DP_PRINT_STATS("NSS(0-7) = %s",
  3866. nss);
  3867. DP_PRINT_STATS("SGI ="
  3868. " 0.8us %d,"
  3869. " 0.4us %d,"
  3870. " 1.6us %d,"
  3871. " 3.2us %d,",
  3872. pdev->stats.rx.sgi_count[0],
  3873. pdev->stats.rx.sgi_count[1],
  3874. pdev->stats.rx.sgi_count[2],
  3875. pdev->stats.rx.sgi_count[3]);
  3876. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  3877. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  3878. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  3879. DP_PRINT_STATS("Reception Type ="
  3880. " SU: %d,"
  3881. " MU_MIMO:%d,"
  3882. " MU_OFDMA:%d,"
  3883. " MU_OFDMA_MIMO:%d\n",
  3884. pdev->stats.rx.reception_type[0],
  3885. pdev->stats.rx.reception_type[1],
  3886. pdev->stats.rx.reception_type[2],
  3887. pdev->stats.rx.reception_type[3]);
  3888. DP_PRINT_STATS("Aggregation:\n");
  3889. DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
  3890. pdev->stats.rx.ampdu_cnt);
  3891. DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
  3892. pdev->stats.rx.non_ampdu_cnt);
  3893. DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
  3894. pdev->stats.rx.amsdu_cnt);
  3895. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
  3896. pdev->stats.rx.non_amsdu_cnt);
  3897. }
  3898. /**
  3899. * dp_print_tx_rates(): Print tx rates
  3900. * @vdev: DP_VDEV handle
  3901. *
  3902. * Return:void
  3903. */
  3904. static inline void
  3905. dp_print_tx_rates(struct dp_vdev *vdev)
  3906. {
  3907. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  3908. uint8_t mcs, pkt_type;
  3909. uint32_t index;
  3910. DP_PRINT_STATS("Tx Rate Info:\n");
  3911. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  3912. index = 0;
  3913. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  3914. if (!dp_rate_string[pkt_type][mcs].valid)
  3915. continue;
  3916. DP_PRINT_STATS(" %s = %d",
  3917. dp_rate_string[pkt_type][mcs].mcs_type,
  3918. pdev->stats.tx.pkt_type[pkt_type].
  3919. mcs_count[mcs]);
  3920. }
  3921. DP_PRINT_STATS("\n");
  3922. }
  3923. DP_PRINT_STATS("SGI ="
  3924. " 0.8us %d"
  3925. " 0.4us %d"
  3926. " 1.6us %d"
  3927. " 3.2us %d",
  3928. pdev->stats.tx.sgi_count[0],
  3929. pdev->stats.tx.sgi_count[1],
  3930. pdev->stats.tx.sgi_count[2],
  3931. pdev->stats.tx.sgi_count[3]);
  3932. DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  3933. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  3934. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  3935. DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
  3936. DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
  3937. DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
  3938. DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
  3939. DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  3940. DP_PRINT_STATS("Aggregation:\n");
  3941. DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
  3942. pdev->stats.tx.amsdu_cnt);
  3943. DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
  3944. pdev->stats.tx.non_amsdu_cnt);
  3945. }
  3946. /**
  3947. * dp_print_peer_stats():print peer stats
  3948. * @peer: DP_PEER handle
  3949. *
  3950. * return void
  3951. */
  3952. static inline void dp_print_peer_stats(struct dp_peer *peer)
  3953. {
  3954. uint8_t i, mcs, pkt_type;
  3955. uint32_t index;
  3956. char nss[DP_NSS_LENGTH];
  3957. DP_PRINT_STATS("Node Tx Stats:\n");
  3958. DP_PRINT_STATS("Total Packet Completions = %d",
  3959. peer->stats.tx.comp_pkt.num);
  3960. DP_PRINT_STATS("Total Bytes Completions = %d",
  3961. peer->stats.tx.comp_pkt.bytes);
  3962. DP_PRINT_STATS("Success Packets = %d",
  3963. peer->stats.tx.tx_success.num);
  3964. DP_PRINT_STATS("Success Bytes = %d",
  3965. peer->stats.tx.tx_success.bytes);
  3966. DP_PRINT_STATS("Unicast Success Packets = %d",
  3967. peer->stats.tx.ucast.num);
  3968. DP_PRINT_STATS("Unicast Success Bytes = %d",
  3969. peer->stats.tx.ucast.bytes);
  3970. DP_PRINT_STATS("Multicast Success Packets = %d",
  3971. peer->stats.tx.mcast.num);
  3972. DP_PRINT_STATS("Multicast Success Bytes = %d",
  3973. peer->stats.tx.mcast.bytes);
  3974. DP_PRINT_STATS("Packets Failed = %d",
  3975. peer->stats.tx.tx_failed);
  3976. DP_PRINT_STATS("Packets In OFDMA = %d",
  3977. peer->stats.tx.ofdma);
  3978. DP_PRINT_STATS("Packets In STBC = %d",
  3979. peer->stats.tx.stbc);
  3980. DP_PRINT_STATS("Packets In LDPC = %d",
  3981. peer->stats.tx.ldpc);
  3982. DP_PRINT_STATS("Packet Retries = %d",
  3983. peer->stats.tx.retries);
  3984. DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
  3985. peer->stats.tx.amsdu_cnt);
  3986. DP_PRINT_STATS("Last Packet RSSI = %d",
  3987. peer->stats.tx.last_ack_rssi);
  3988. DP_PRINT_STATS("Dropped At FW: Removed = %d",
  3989. peer->stats.tx.dropped.fw_rem);
  3990. DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
  3991. peer->stats.tx.dropped.fw_rem_tx);
  3992. DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
  3993. peer->stats.tx.dropped.fw_rem_notx);
  3994. DP_PRINT_STATS("Dropped : Age Out = %d",
  3995. peer->stats.tx.dropped.age_out);
  3996. DP_PRINT_STATS("NAWDS : ");
  3997. DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
  3998. peer->stats.tx.nawds_mcast_drop);
  3999. DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
  4000. peer->stats.tx.nawds_mcast.num);
  4001. DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %d",
  4002. peer->stats.tx.nawds_mcast.bytes);
  4003. DP_PRINT_STATS("Rate Info:");
  4004. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  4005. index = 0;
  4006. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  4007. if (!dp_rate_string[pkt_type][mcs].valid)
  4008. continue;
  4009. DP_PRINT_STATS(" %s = %d",
  4010. dp_rate_string[pkt_type][mcs].mcs_type,
  4011. peer->stats.tx.pkt_type[pkt_type].
  4012. mcs_count[mcs]);
  4013. }
  4014. DP_PRINT_STATS("\n");
  4015. }
  4016. DP_PRINT_STATS("SGI = "
  4017. " 0.8us %d"
  4018. " 0.4us %d"
  4019. " 1.6us %d"
  4020. " 3.2us %d",
  4021. peer->stats.tx.sgi_count[0],
  4022. peer->stats.tx.sgi_count[1],
  4023. peer->stats.tx.sgi_count[2],
  4024. peer->stats.tx.sgi_count[3]);
  4025. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  4026. peer->stats.tx.bw[2], peer->stats.tx.bw[3],
  4027. peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
  4028. DP_PRINT_STATS("Aggregation:");
  4029. DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
  4030. peer->stats.tx.amsdu_cnt);
  4031. DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
  4032. peer->stats.tx.non_amsdu_cnt);
  4033. DP_PRINT_STATS("Node Rx Stats:");
  4034. DP_PRINT_STATS("Packets Sent To Stack = %d",
  4035. peer->stats.rx.to_stack.num);
  4036. DP_PRINT_STATS("Bytes Sent To Stack = %d",
  4037. peer->stats.rx.to_stack.bytes);
  4038. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  4039. DP_PRINT_STATS("Ring Id = %d", i);
  4040. DP_PRINT_STATS(" Packets Received = %d",
  4041. peer->stats.rx.rcvd_reo[i].num);
  4042. DP_PRINT_STATS(" Bytes Received = %d",
  4043. peer->stats.rx.rcvd_reo[i].bytes);
  4044. }
  4045. DP_PRINT_STATS("Multicast Packets Received = %d",
  4046. peer->stats.rx.multicast.num);
  4047. DP_PRINT_STATS("Multicast Bytes Received = %d",
  4048. peer->stats.rx.multicast.bytes);
  4049. DP_PRINT_STATS("WDS Packets Received = %d",
  4050. peer->stats.rx.wds.num);
  4051. DP_PRINT_STATS("WDS Bytes Received = %d",
  4052. peer->stats.rx.wds.bytes);
  4053. DP_PRINT_STATS("Intra BSS Packets Received = %d",
  4054. peer->stats.rx.intra_bss.pkts.num);
  4055. DP_PRINT_STATS("Intra BSS Bytes Received = %d",
  4056. peer->stats.rx.intra_bss.pkts.bytes);
  4057. DP_PRINT_STATS("Raw Packets Received = %d",
  4058. peer->stats.rx.raw.num);
  4059. DP_PRINT_STATS("Raw Bytes Received = %d",
  4060. peer->stats.rx.raw.bytes);
  4061. DP_PRINT_STATS("Errors: MIC Errors = %d",
  4062. peer->stats.rx.err.mic_err);
  4063. DP_PRINT_STATS("Erros: Decryption Errors = %d",
  4064. peer->stats.rx.err.decrypt_err);
  4065. DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
  4066. peer->stats.rx.non_ampdu_cnt);
  4067. DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
  4068. peer->stats.rx.ampdu_cnt);
  4069. DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
  4070. peer->stats.rx.non_amsdu_cnt);
  4071. DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
  4072. peer->stats.rx.amsdu_cnt);
  4073. DP_PRINT_STATS("NAWDS : ");
  4074. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
  4075. peer->stats.rx.nawds_mcast_drop.num);
  4076. DP_PRINT_STATS(" Nawds multicast Drop Rx Packet Bytes = %d",
  4077. peer->stats.rx.nawds_mcast_drop.bytes);
  4078. DP_PRINT_STATS("SGI ="
  4079. " 0.8us %d"
  4080. " 0.4us %d"
  4081. " 1.6us %d"
  4082. " 3.2us %d",
  4083. peer->stats.rx.sgi_count[0],
  4084. peer->stats.rx.sgi_count[1],
  4085. peer->stats.rx.sgi_count[2],
  4086. peer->stats.rx.sgi_count[3]);
  4087. DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  4088. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  4089. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  4090. DP_PRINT_STATS("Reception Type ="
  4091. " SU %d,"
  4092. " MU_MIMO %d,"
  4093. " MU_OFDMA %d,"
  4094. " MU_OFDMA_MIMO %d",
  4095. peer->stats.rx.reception_type[0],
  4096. peer->stats.rx.reception_type[1],
  4097. peer->stats.rx.reception_type[2],
  4098. peer->stats.rx.reception_type[3]);
  4099. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  4100. index = 0;
  4101. for (mcs = 0; mcs < MAX_MCS; mcs++) {
  4102. if (!dp_rate_string[pkt_type][mcs].valid)
  4103. continue;
  4104. DP_PRINT_STATS(" %s = %d",
  4105. dp_rate_string[pkt_type][mcs].mcs_type,
  4106. peer->stats.rx.pkt_type[pkt_type].
  4107. mcs_count[mcs]);
  4108. }
  4109. DP_PRINT_STATS("\n");
  4110. }
  4111. index = 0;
  4112. for (i = 0; i < SS_COUNT; i++) {
  4113. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  4114. " %d", peer->stats.rx.nss[i]);
  4115. }
  4116. DP_PRINT_STATS("NSS(0-7) = %s",
  4117. nss);
  4118. DP_PRINT_STATS("Aggregation:");
  4119. DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
  4120. peer->stats.rx.ampdu_cnt);
  4121. DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
  4122. peer->stats.rx.non_ampdu_cnt);
  4123. DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
  4124. peer->stats.rx.amsdu_cnt);
  4125. DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
  4126. peer->stats.rx.non_amsdu_cnt);
  4127. }
  4128. /**
  4129. * dp_print_host_stats()- Function to print the stats aggregated at host
  4130. * @vdev_handle: DP_VDEV handle
  4131. * @type: host stats type
  4132. *
  4133. * Available Stat types
  4134. * TXRX_CLEAR_STATS : Clear the stats
  4135. * TXRX_RX_RATE_STATS: Print Rx Rate Info
  4136. * TXRX_TX_RATE_STATS: Print Tx Rate Info
  4137. * TXRX_TX_HOST_STATS: Print Tx Stats
  4138. * TXRX_RX_HOST_STATS: Print Rx Stats
  4139. * TXRX_AST_STATS: Print AST Stats
  4140. * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
  4141. *
  4142. * Return: 0 on success, print error message in case of failure
  4143. */
  4144. static int
  4145. dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
  4146. {
  4147. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4148. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  4149. dp_aggregate_pdev_stats(pdev);
  4150. switch (type) {
  4151. case TXRX_CLEAR_STATS:
  4152. dp_txrx_host_stats_clr(vdev);
  4153. break;
  4154. case TXRX_RX_RATE_STATS:
  4155. dp_print_rx_rates(vdev);
  4156. break;
  4157. case TXRX_TX_RATE_STATS:
  4158. dp_print_tx_rates(vdev);
  4159. break;
  4160. case TXRX_TX_HOST_STATS:
  4161. dp_print_pdev_tx_stats(pdev);
  4162. dp_print_soc_tx_stats(pdev->soc);
  4163. break;
  4164. case TXRX_RX_HOST_STATS:
  4165. dp_print_pdev_rx_stats(pdev);
  4166. dp_print_soc_rx_stats(pdev->soc);
  4167. break;
  4168. case TXRX_AST_STATS:
  4169. dp_print_ast_stats(pdev->soc);
  4170. break;
  4171. case TXRX_SRNG_PTR_STATS:
  4172. dp_print_ring_stats(pdev);
  4173. break;
  4174. default:
  4175. DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
  4176. break;
  4177. }
  4178. return 0;
  4179. }
  4180. /*
  4181. * dp_get_host_peer_stats()- function to print peer stats
  4182. * @pdev_handle: DP_PDEV handle
  4183. * @mac_addr: mac address of the peer
  4184. *
  4185. * Return: void
  4186. */
  4187. static void
  4188. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  4189. {
  4190. struct dp_peer *peer;
  4191. uint8_t local_id;
  4192. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  4193. &local_id);
  4194. if (!peer) {
  4195. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  4196. "%s: Invalid peer\n", __func__);
  4197. return;
  4198. }
  4199. dp_print_peer_stats(peer);
  4200. dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
  4201. return;
  4202. }
  4203. /*
  4204. * dp_ppdu_ring_reset()- Reset PPDU Stats ring
  4205. * @pdev: DP_PDEV handle
  4206. *
  4207. * Return: void
  4208. */
  4209. static void
  4210. dp_ppdu_ring_reset(struct dp_pdev *pdev)
  4211. {
  4212. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  4213. qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
  4214. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
  4215. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  4216. RX_BUFFER_SIZE, &htt_tlv_filter);
  4217. }
  4218. /*
  4219. * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
  4220. * @pdev: DP_PDEV handle
  4221. *
  4222. * Return: void
  4223. */
  4224. static void
  4225. dp_ppdu_ring_cfg(struct dp_pdev *pdev)
  4226. {
  4227. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  4228. htt_tlv_filter.mpdu_start = 0;
  4229. htt_tlv_filter.msdu_start = 0;
  4230. htt_tlv_filter.packet = 0;
  4231. htt_tlv_filter.msdu_end = 0;
  4232. htt_tlv_filter.mpdu_end = 0;
  4233. htt_tlv_filter.packet_header = 1;
  4234. htt_tlv_filter.attention = 1;
  4235. htt_tlv_filter.ppdu_start = 1;
  4236. htt_tlv_filter.ppdu_end = 1;
  4237. htt_tlv_filter.ppdu_end_user_stats = 1;
  4238. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4239. htt_tlv_filter.ppdu_end_status_done = 1;
  4240. htt_tlv_filter.enable_fp = 1;
  4241. htt_tlv_filter.enable_md = 0;
  4242. htt_tlv_filter.enable_mo = 0;
  4243. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  4244. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  4245. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  4246. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  4247. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  4248. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  4249. htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
  4250. pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
  4251. RX_BUFFER_SIZE, &htt_tlv_filter);
  4252. }
  4253. /*
  4254. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  4255. * @pdev_handle: DP_PDEV handle
  4256. * @val: user provided value
  4257. *
  4258. * Return: void
  4259. */
  4260. static void
  4261. dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
  4262. {
  4263. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4264. switch (val) {
  4265. case 0:
  4266. pdev->tx_sniffer_enable = 0;
  4267. pdev->mcopy_mode = 0;
  4268. if (!pdev->enhanced_stats_en) {
  4269. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  4270. dp_ppdu_ring_reset(pdev);
  4271. }
  4272. break;
  4273. case 1:
  4274. pdev->tx_sniffer_enable = 1;
  4275. pdev->mcopy_mode = 0;
  4276. if (!pdev->enhanced_stats_en)
  4277. dp_h2t_cfg_stats_msg_send(pdev,
  4278. DP_PPDU_STATS_CFG_ALL, pdev->pdev_id);
  4279. break;
  4280. case 2:
  4281. pdev->mcopy_mode = 1;
  4282. pdev->tx_sniffer_enable = 0;
  4283. if (!pdev->enhanced_stats_en) {
  4284. dp_ppdu_ring_cfg(pdev);
  4285. dp_h2t_cfg_stats_msg_send(pdev,
  4286. DP_PPDU_STATS_CFG_ALL, pdev->pdev_id);
  4287. }
  4288. break;
  4289. default:
  4290. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4291. "Invalid value\n");
  4292. break;
  4293. }
  4294. }
  4295. /*
  4296. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  4297. * @pdev_handle: DP_PDEV handle
  4298. *
  4299. * Return: void
  4300. */
  4301. static void
  4302. dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
  4303. {
  4304. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4305. pdev->enhanced_stats_en = 1;
  4306. if (!pdev->mcopy_mode)
  4307. dp_ppdu_ring_cfg(pdev);
  4308. if (!pdev->tx_sniffer_enable && !pdev->mcopy_mode)
  4309. dp_h2t_cfg_stats_msg_send(pdev, 0xffff, pdev->pdev_id);
  4310. }
  4311. /*
  4312. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  4313. * @pdev_handle: DP_PDEV handle
  4314. *
  4315. * Return: void
  4316. */
  4317. static void
  4318. dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
  4319. {
  4320. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4321. pdev->enhanced_stats_en = 0;
  4322. if (!pdev->tx_sniffer_enable && !pdev->mcopy_mode)
  4323. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  4324. if (!pdev->mcopy_mode)
  4325. dp_ppdu_ring_reset(pdev);
  4326. }
  4327. /*
  4328. * dp_get_fw_peer_stats()- function to print peer stats
  4329. * @pdev_handle: DP_PDEV handle
  4330. * @mac_addr: mac address of the peer
  4331. * @cap: Type of htt stats requested
  4332. *
  4333. * Currently Supporting only MAC ID based requests Only
  4334. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  4335. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  4336. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  4337. *
  4338. * Return: void
  4339. */
  4340. static void
  4341. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  4342. uint32_t cap)
  4343. {
  4344. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4345. int i;
  4346. uint32_t config_param0 = 0;
  4347. uint32_t config_param1 = 0;
  4348. uint32_t config_param2 = 0;
  4349. uint32_t config_param3 = 0;
  4350. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  4351. config_param0 |= (1 << (cap + 1));
  4352. for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
  4353. config_param1 |= (1 << i);
  4354. }
  4355. config_param2 |= (mac_addr[0] & 0x000000ff);
  4356. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  4357. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  4358. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  4359. config_param3 |= (mac_addr[4] & 0x000000ff);
  4360. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  4361. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  4362. config_param0, config_param1, config_param2,
  4363. config_param3, 0);
  4364. }
  4365. /* This struct definition will be removed from here
  4366. * once it get added in FW headers*/
  4367. struct httstats_cmd_req {
  4368. uint32_t config_param0;
  4369. uint32_t config_param1;
  4370. uint32_t config_param2;
  4371. uint32_t config_param3;
  4372. int cookie;
  4373. u_int8_t stats_id;
  4374. };
  4375. /*
  4376. * dp_get_htt_stats: function to process the httstas request
  4377. * @pdev_handle: DP pdev handle
  4378. * @data: pointer to request data
  4379. * @data_len: length for request data
  4380. *
  4381. * return: void
  4382. */
  4383. static void
  4384. dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
  4385. {
  4386. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4387. struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
  4388. QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
  4389. dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
  4390. req->config_param0, req->config_param1,
  4391. req->config_param2, req->config_param3,
  4392. req->cookie);
  4393. }
  4394. /*
  4395. * dp_set_pdev_param: function to set parameters in pdev
  4396. * @pdev_handle: DP pdev handle
  4397. * @param: parameter type to be set
  4398. * @val: value of parameter to be set
  4399. *
  4400. * return: void
  4401. */
  4402. static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
  4403. enum cdp_pdev_param_type param, uint8_t val)
  4404. {
  4405. switch (param) {
  4406. case CDP_CONFIG_DEBUG_SNIFFER:
  4407. dp_config_debug_sniffer(pdev_handle, val);
  4408. break;
  4409. default:
  4410. break;
  4411. }
  4412. }
  4413. /*
  4414. * dp_set_vdev_param: function to set parameters in vdev
  4415. * @param: parameter type to be set
  4416. * @val: value of parameter to be set
  4417. *
  4418. * return: void
  4419. */
  4420. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  4421. enum cdp_vdev_param_type param, uint32_t val)
  4422. {
  4423. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4424. switch (param) {
  4425. case CDP_ENABLE_WDS:
  4426. vdev->wds_enabled = val;
  4427. break;
  4428. case CDP_ENABLE_NAWDS:
  4429. vdev->nawds_enabled = val;
  4430. break;
  4431. case CDP_ENABLE_MCAST_EN:
  4432. vdev->mcast_enhancement_en = val;
  4433. break;
  4434. case CDP_ENABLE_PROXYSTA:
  4435. vdev->proxysta_vdev = val;
  4436. break;
  4437. case CDP_UPDATE_TDLS_FLAGS:
  4438. vdev->tdls_link_connected = val;
  4439. break;
  4440. case CDP_CFG_WDS_AGING_TIMER:
  4441. if (val == 0)
  4442. qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
  4443. else if (val != vdev->wds_aging_timer_val)
  4444. qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
  4445. vdev->wds_aging_timer_val = val;
  4446. break;
  4447. case CDP_ENABLE_AP_BRIDGE:
  4448. if (wlan_op_mode_sta != vdev->opmode)
  4449. vdev->ap_bridge_enabled = val;
  4450. else
  4451. vdev->ap_bridge_enabled = false;
  4452. break;
  4453. case CDP_ENABLE_CIPHER:
  4454. vdev->sec_type = val;
  4455. break;
  4456. case CDP_ENABLE_QWRAP_ISOLATION:
  4457. vdev->isolation_vdev = val;
  4458. break;
  4459. default:
  4460. break;
  4461. }
  4462. dp_tx_vdev_update_search_flags(vdev);
  4463. }
  4464. /**
  4465. * dp_peer_set_nawds: set nawds bit in peer
  4466. * @peer_handle: pointer to peer
  4467. * @value: enable/disable nawds
  4468. *
  4469. * return: void
  4470. */
  4471. static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
  4472. {
  4473. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4474. peer->nawds_enabled = value;
  4475. }
  4476. /*
  4477. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  4478. * @vdev_handle: DP_VDEV handle
  4479. * @map_id:ID of map that needs to be updated
  4480. *
  4481. * Return: void
  4482. */
  4483. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  4484. uint8_t map_id)
  4485. {
  4486. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4487. vdev->dscp_tid_map_id = map_id;
  4488. return;
  4489. }
  4490. /**
  4491. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  4492. * @pdev: DP_PDEV handle
  4493. * @map_id: ID of map that needs to be updated
  4494. * @tos: index value in map
  4495. * @tid: tid value passed by the user
  4496. *
  4497. * Return: void
  4498. */
  4499. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  4500. uint8_t map_id, uint8_t tos, uint8_t tid)
  4501. {
  4502. uint8_t dscp;
  4503. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  4504. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  4505. pdev->dscp_tid_map[map_id][dscp] = tid;
  4506. if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
  4507. hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
  4508. map_id, dscp);
  4509. return;
  4510. }
  4511. /**
  4512. * dp_fw_stats_process(): Process TxRX FW stats request
  4513. * @vdev_handle: DP VDEV handle
  4514. * @req: stats request
  4515. *
  4516. * return: int
  4517. */
  4518. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
  4519. struct cdp_txrx_stats_req *req)
  4520. {
  4521. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4522. struct dp_pdev *pdev = NULL;
  4523. uint32_t stats = req->stats;
  4524. if (!vdev) {
  4525. DP_TRACE(NONE, "VDEV not found");
  4526. return 1;
  4527. }
  4528. pdev = vdev->pdev;
  4529. return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
  4530. req->param1, req->param2, req->param3, 0);
  4531. }
  4532. /**
  4533. * dp_txrx_stats_request - function to map to firmware and host stats
  4534. * @vdev: virtual handle
  4535. * @req: stats request
  4536. *
  4537. * Return: integer
  4538. */
  4539. static int dp_txrx_stats_request(struct cdp_vdev *vdev,
  4540. struct cdp_txrx_stats_req *req)
  4541. {
  4542. int host_stats;
  4543. int fw_stats;
  4544. enum cdp_stats stats;
  4545. if (!vdev || !req) {
  4546. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4547. "Invalid vdev/req instance");
  4548. return 0;
  4549. }
  4550. stats = req->stats;
  4551. if (stats >= CDP_TXRX_MAX_STATS)
  4552. return 0;
  4553. /*
  4554. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  4555. * has to be updated if new FW HTT stats added
  4556. */
  4557. if (stats > CDP_TXRX_STATS_HTT_MAX)
  4558. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  4559. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  4560. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  4561. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4562. "stats: %u fw_stats_type: %d host_stats_type: %d",
  4563. stats, fw_stats, host_stats);
  4564. if (fw_stats != TXRX_FW_STATS_INVALID) {
  4565. /* update request with FW stats type */
  4566. req->stats = fw_stats;
  4567. return dp_fw_stats_process(vdev, req);
  4568. }
  4569. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  4570. (host_stats <= TXRX_HOST_STATS_MAX))
  4571. return dp_print_host_stats(vdev, host_stats);
  4572. else
  4573. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4574. "Wrong Input for TxRx Stats");
  4575. return 0;
  4576. }
  4577. /**
  4578. * dp_txrx_stats() - function to map to firmware and host stats
  4579. * @vdev: virtual handle
  4580. * @stats: type of statistics requested
  4581. *
  4582. * Return: integer
  4583. */
  4584. static int dp_txrx_stats(struct cdp_vdev *vdev, enum cdp_stats stats)
  4585. {
  4586. struct cdp_txrx_stats_req req = {0,};
  4587. req.stats = stats;
  4588. return dp_txrx_stats_request(vdev, &req);
  4589. }
  4590. /*
  4591. * dp_print_napi_stats(): NAPI stats
  4592. * @soc - soc handle
  4593. */
  4594. static void dp_print_napi_stats(struct dp_soc *soc)
  4595. {
  4596. hif_print_napi_stats(soc->hif_handle);
  4597. }
  4598. /*
  4599. * dp_print_per_ring_stats(): Packet count per ring
  4600. * @soc - soc handle
  4601. */
  4602. static void dp_print_per_ring_stats(struct dp_soc *soc)
  4603. {
  4604. uint8_t ring;
  4605. uint16_t core;
  4606. uint64_t total_packets;
  4607. DP_TRACE(FATAL, "Reo packets per ring:");
  4608. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  4609. total_packets = 0;
  4610. DP_TRACE(FATAL, "Packets on ring %u:", ring);
  4611. for (core = 0; core < NR_CPUS; core++) {
  4612. DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
  4613. core, soc->stats.rx.ring_packets[core][ring]);
  4614. total_packets += soc->stats.rx.ring_packets[core][ring];
  4615. }
  4616. DP_TRACE(FATAL, "Total packets on ring %u: %llu",
  4617. ring, total_packets);
  4618. }
  4619. }
  4620. /*
  4621. * dp_txrx_path_stats() - Function to display dump stats
  4622. * @soc - soc handle
  4623. *
  4624. * return: none
  4625. */
  4626. static void dp_txrx_path_stats(struct dp_soc *soc)
  4627. {
  4628. uint8_t error_code;
  4629. uint8_t loop_pdev;
  4630. struct dp_pdev *pdev;
  4631. uint8_t i;
  4632. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  4633. pdev = soc->pdev_list[loop_pdev];
  4634. dp_aggregate_pdev_stats(pdev);
  4635. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4636. "Tx path Statistics:");
  4637. DP_TRACE(FATAL, "from stack: %u msdus (%u bytes)",
  4638. pdev->stats.tx_i.rcvd.num,
  4639. pdev->stats.tx_i.rcvd.bytes);
  4640. DP_TRACE(FATAL, "processed from host: %u msdus (%u bytes)",
  4641. pdev->stats.tx_i.processed.num,
  4642. pdev->stats.tx_i.processed.bytes);
  4643. DP_TRACE(FATAL, "successfully transmitted: %u msdus (%u bytes)",
  4644. pdev->stats.tx.tx_success.num,
  4645. pdev->stats.tx.tx_success.bytes);
  4646. DP_TRACE(FATAL, "Dropped in host:");
  4647. DP_TRACE(FATAL, "Total packets dropped: %u,",
  4648. pdev->stats.tx_i.dropped.dropped_pkt.num);
  4649. DP_TRACE(FATAL, "Descriptor not available: %u",
  4650. pdev->stats.tx_i.dropped.desc_na);
  4651. DP_TRACE(FATAL, "Ring full: %u",
  4652. pdev->stats.tx_i.dropped.ring_full);
  4653. DP_TRACE(FATAL, "Enqueue fail: %u",
  4654. pdev->stats.tx_i.dropped.enqueue_fail);
  4655. DP_TRACE(FATAL, "DMA Error: %u",
  4656. pdev->stats.tx_i.dropped.dma_error);
  4657. DP_TRACE(FATAL, "Dropped in hardware:");
  4658. DP_TRACE(FATAL, "total packets dropped: %u",
  4659. pdev->stats.tx.tx_failed);
  4660. DP_TRACE(FATAL, "mpdu age out: %u",
  4661. pdev->stats.tx.dropped.age_out);
  4662. DP_TRACE(FATAL, "firmware removed: %u",
  4663. pdev->stats.tx.dropped.fw_rem);
  4664. DP_TRACE(FATAL, "firmware removed tx: %u",
  4665. pdev->stats.tx.dropped.fw_rem_tx);
  4666. DP_TRACE(FATAL, "firmware removed notx %u",
  4667. pdev->stats.tx.dropped.fw_rem_notx);
  4668. DP_TRACE(FATAL, "peer_invalid: %u",
  4669. pdev->soc->stats.tx.tx_invalid_peer.num);
  4670. DP_TRACE(FATAL, "Tx packets sent per interrupt:");
  4671. DP_TRACE(FATAL, "Single Packet: %u",
  4672. pdev->stats.tx_comp_histogram.pkts_1);
  4673. DP_TRACE(FATAL, "2-20 Packets: %u",
  4674. pdev->stats.tx_comp_histogram.pkts_2_20);
  4675. DP_TRACE(FATAL, "21-40 Packets: %u",
  4676. pdev->stats.tx_comp_histogram.pkts_21_40);
  4677. DP_TRACE(FATAL, "41-60 Packets: %u",
  4678. pdev->stats.tx_comp_histogram.pkts_41_60);
  4679. DP_TRACE(FATAL, "61-80 Packets: %u",
  4680. pdev->stats.tx_comp_histogram.pkts_61_80);
  4681. DP_TRACE(FATAL, "81-100 Packets: %u",
  4682. pdev->stats.tx_comp_histogram.pkts_81_100);
  4683. DP_TRACE(FATAL, "101-200 Packets: %u",
  4684. pdev->stats.tx_comp_histogram.pkts_101_200);
  4685. DP_TRACE(FATAL, " 201+ Packets: %u",
  4686. pdev->stats.tx_comp_histogram.pkts_201_plus);
  4687. DP_TRACE(FATAL, "Rx path statistics");
  4688. DP_TRACE(FATAL, "delivered %u msdus ( %u bytes),",
  4689. pdev->stats.rx.to_stack.num,
  4690. pdev->stats.rx.to_stack.bytes);
  4691. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  4692. DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %u bytes),",
  4693. i, pdev->stats.rx.rcvd_reo[i].num,
  4694. pdev->stats.rx.rcvd_reo[i].bytes);
  4695. DP_TRACE(FATAL, "intra-bss packets %u msdus ( %u bytes),",
  4696. pdev->stats.rx.intra_bss.pkts.num,
  4697. pdev->stats.rx.intra_bss.pkts.bytes);
  4698. DP_TRACE(FATAL, "intra-bss fails %u msdus ( %u bytes),",
  4699. pdev->stats.rx.intra_bss.fail.num,
  4700. pdev->stats.rx.intra_bss.fail.bytes);
  4701. DP_TRACE(FATAL, "raw packets %u msdus ( %u bytes),",
  4702. pdev->stats.rx.raw.num,
  4703. pdev->stats.rx.raw.bytes);
  4704. DP_TRACE(FATAL, "dropped: error %u msdus",
  4705. pdev->stats.rx.err.mic_err);
  4706. DP_TRACE(FATAL, "peer invalid %u",
  4707. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  4708. DP_TRACE(FATAL, "Reo Statistics");
  4709. DP_TRACE(FATAL, "rbm error: %u msdus",
  4710. pdev->soc->stats.rx.err.invalid_rbm);
  4711. DP_TRACE(FATAL, "hal ring access fail: %u msdus",
  4712. pdev->soc->stats.rx.err.hal_ring_access_fail);
  4713. DP_TRACE(FATAL, "Reo errors");
  4714. for (error_code = 0; error_code < HAL_REO_ERR_MAX;
  4715. error_code++) {
  4716. DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
  4717. error_code,
  4718. pdev->soc->stats.rx.err.reo_error[error_code]);
  4719. }
  4720. for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
  4721. error_code++) {
  4722. DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
  4723. error_code,
  4724. pdev->soc->stats.rx.err
  4725. .rxdma_error[error_code]);
  4726. }
  4727. DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
  4728. DP_TRACE(FATAL, "Single Packet: %u",
  4729. pdev->stats.rx_ind_histogram.pkts_1);
  4730. DP_TRACE(FATAL, "2-20 Packets: %u",
  4731. pdev->stats.rx_ind_histogram.pkts_2_20);
  4732. DP_TRACE(FATAL, "21-40 Packets: %u",
  4733. pdev->stats.rx_ind_histogram.pkts_21_40);
  4734. DP_TRACE(FATAL, "41-60 Packets: %u",
  4735. pdev->stats.rx_ind_histogram.pkts_41_60);
  4736. DP_TRACE(FATAL, "61-80 Packets: %u",
  4737. pdev->stats.rx_ind_histogram.pkts_61_80);
  4738. DP_TRACE(FATAL, "81-100 Packets: %u",
  4739. pdev->stats.rx_ind_histogram.pkts_81_100);
  4740. DP_TRACE(FATAL, "101-200 Packets: %u",
  4741. pdev->stats.rx_ind_histogram.pkts_101_200);
  4742. DP_TRACE(FATAL, " 201+ Packets: %u",
  4743. pdev->stats.rx_ind_histogram.pkts_201_plus);
  4744. DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
  4745. __func__,
  4746. pdev->soc->wlan_cfg_ctx->tso_enabled,
  4747. pdev->soc->wlan_cfg_ctx->lro_enabled,
  4748. pdev->soc->wlan_cfg_ctx->rx_hash,
  4749. pdev->soc->wlan_cfg_ctx->napi_enabled);
  4750. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4751. DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
  4752. __func__,
  4753. pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
  4754. pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
  4755. #endif
  4756. }
  4757. }
  4758. /*
  4759. * dp_txrx_dump_stats() - Dump statistics
  4760. * @value - Statistics option
  4761. */
  4762. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
  4763. enum qdf_stats_verbosity_level level)
  4764. {
  4765. struct dp_soc *soc =
  4766. (struct dp_soc *)psoc;
  4767. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4768. if (!soc) {
  4769. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4770. "%s: soc is NULL", __func__);
  4771. return QDF_STATUS_E_INVAL;
  4772. }
  4773. switch (value) {
  4774. case CDP_TXRX_PATH_STATS:
  4775. dp_txrx_path_stats(soc);
  4776. break;
  4777. case CDP_RX_RING_STATS:
  4778. dp_print_per_ring_stats(soc);
  4779. break;
  4780. case CDP_TXRX_TSO_STATS:
  4781. /* TODO: NOT IMPLEMENTED */
  4782. break;
  4783. case CDP_DUMP_TX_FLOW_POOL_INFO:
  4784. cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
  4785. break;
  4786. case CDP_DP_NAPI_STATS:
  4787. dp_print_napi_stats(soc);
  4788. break;
  4789. case CDP_TXRX_DESC_STATS:
  4790. /* TODO: NOT IMPLEMENTED */
  4791. break;
  4792. default:
  4793. status = QDF_STATUS_E_INVAL;
  4794. break;
  4795. }
  4796. return status;
  4797. }
  4798. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4799. /**
  4800. * dp_update_flow_control_parameters() - API to store datapath
  4801. * config parameters
  4802. * @soc: soc handle
  4803. * @cfg: ini parameter handle
  4804. *
  4805. * Return: void
  4806. */
  4807. static inline
  4808. void dp_update_flow_control_parameters(struct dp_soc *soc,
  4809. struct cdp_config_params *params)
  4810. {
  4811. soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
  4812. params->tx_flow_stop_queue_threshold;
  4813. soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
  4814. params->tx_flow_start_queue_offset;
  4815. }
  4816. #else
  4817. static inline
  4818. void dp_update_flow_control_parameters(struct dp_soc *soc,
  4819. struct cdp_config_params *params)
  4820. {
  4821. }
  4822. #endif
  4823. /**
  4824. * dp_update_config_parameters() - API to store datapath
  4825. * config parameters
  4826. * @soc: soc handle
  4827. * @cfg: ini parameter handle
  4828. *
  4829. * Return: status
  4830. */
  4831. static
  4832. QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
  4833. struct cdp_config_params *params)
  4834. {
  4835. struct dp_soc *soc = (struct dp_soc *)psoc;
  4836. if (!(soc)) {
  4837. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4838. "%s: Invalid handle", __func__);
  4839. return QDF_STATUS_E_INVAL;
  4840. }
  4841. soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
  4842. soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
  4843. soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
  4844. soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
  4845. params->tcp_udp_checksumoffload;
  4846. soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
  4847. dp_update_flow_control_parameters(soc, params);
  4848. return QDF_STATUS_SUCCESS;
  4849. }
  4850. /**
  4851. * dp_txrx_set_wds_rx_policy() - API to store datapath
  4852. * config parameters
  4853. * @vdev_handle - datapath vdev handle
  4854. * @cfg: ini parameter handle
  4855. *
  4856. * Return: status
  4857. */
  4858. #ifdef WDS_VENDOR_EXTENSION
  4859. void
  4860. dp_txrx_set_wds_rx_policy(
  4861. struct cdp_vdev *vdev_handle,
  4862. u_int32_t val)
  4863. {
  4864. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4865. struct dp_peer *peer;
  4866. if (vdev->opmode == wlan_op_mode_ap) {
  4867. /* for ap, set it on bss_peer */
  4868. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  4869. if (peer->bss_peer) {
  4870. peer->wds_ecm.wds_rx_filter = 1;
  4871. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  4872. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  4873. break;
  4874. }
  4875. }
  4876. } else if (vdev->opmode == wlan_op_mode_sta) {
  4877. peer = TAILQ_FIRST(&vdev->peer_list);
  4878. peer->wds_ecm.wds_rx_filter = 1;
  4879. peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
  4880. peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
  4881. }
  4882. }
  4883. /**
  4884. * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
  4885. *
  4886. * @peer_handle - datapath peer handle
  4887. * @wds_tx_ucast: policy for unicast transmission
  4888. * @wds_tx_mcast: policy for multicast transmission
  4889. *
  4890. * Return: void
  4891. */
  4892. void
  4893. dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
  4894. int wds_tx_ucast, int wds_tx_mcast)
  4895. {
  4896. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  4897. if (wds_tx_ucast || wds_tx_mcast) {
  4898. peer->wds_enabled = 1;
  4899. peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
  4900. peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
  4901. } else {
  4902. peer->wds_enabled = 0;
  4903. peer->wds_ecm.wds_tx_ucast_4addr = 0;
  4904. peer->wds_ecm.wds_tx_mcast_4addr = 0;
  4905. }
  4906. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4907. FL("Policy Update set to :\
  4908. peer->wds_enabled %d\
  4909. peer->wds_ecm.wds_tx_ucast_4addr %d\
  4910. peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
  4911. peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
  4912. peer->wds_ecm.wds_tx_mcast_4addr);
  4913. return;
  4914. }
  4915. #endif
  4916. static struct cdp_wds_ops dp_ops_wds = {
  4917. .vdev_set_wds = dp_vdev_set_wds,
  4918. #ifdef WDS_VENDOR_EXTENSION
  4919. .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
  4920. .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
  4921. #endif
  4922. };
  4923. /*
  4924. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  4925. * @soc - datapath soc handle
  4926. * @peer - datapath peer handle
  4927. *
  4928. * Delete the AST entries belonging to a peer
  4929. */
  4930. #ifdef FEATURE_WDS
  4931. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  4932. struct dp_peer *peer)
  4933. {
  4934. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  4935. qdf_spin_lock_bh(&soc->ast_lock);
  4936. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
  4937. if (ast_entry->next_hop) {
  4938. soc->cdp_soc.ol_ops->peer_del_wds_entry(
  4939. peer->vdev->osif_vdev,
  4940. ast_entry->mac_addr.raw);
  4941. }
  4942. dp_peer_del_ast(soc, ast_entry);
  4943. }
  4944. qdf_spin_unlock_bh(&soc->ast_lock);
  4945. }
  4946. #else
  4947. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  4948. struct dp_peer *peer)
  4949. {
  4950. }
  4951. #endif
  4952. /*
  4953. * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
  4954. * @vdev_handle - datapath vdev handle
  4955. * @callback - callback function
  4956. * @ctxt: callback context
  4957. *
  4958. */
  4959. static void
  4960. dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
  4961. ol_txrx_data_tx_cb callback, void *ctxt)
  4962. {
  4963. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  4964. vdev->tx_non_std_data_callback.func = callback;
  4965. vdev->tx_non_std_data_callback.ctxt = ctxt;
  4966. }
  4967. #ifdef CONFIG_WIN
  4968. static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  4969. {
  4970. struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
  4971. struct dp_peer *peer = (struct dp_peer *) peer_hdl;
  4972. struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
  4973. dp_peer_delete_ast_entries(soc, peer);
  4974. }
  4975. #endif
  4976. static struct cdp_cmn_ops dp_ops_cmn = {
  4977. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  4978. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  4979. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  4980. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  4981. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  4982. .txrx_peer_create = dp_peer_create_wifi3,
  4983. .txrx_peer_setup = dp_peer_setup_wifi3,
  4984. #ifdef CONFIG_WIN
  4985. .txrx_peer_teardown = dp_peer_teardown_wifi3,
  4986. #else
  4987. .txrx_peer_teardown = NULL,
  4988. #endif
  4989. .txrx_peer_delete = dp_peer_delete_wifi3,
  4990. .txrx_vdev_register = dp_vdev_register_wifi3,
  4991. .txrx_soc_detach = dp_soc_detach_wifi3,
  4992. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  4993. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  4994. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  4995. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  4996. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  4997. .delba_process = dp_delba_process_wifi3,
  4998. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  4999. .flush_cache_rx_queue = NULL,
  5000. /* TODO: get API's for dscp-tid need to be added*/
  5001. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  5002. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  5003. .txrx_stats = dp_txrx_stats,
  5004. .txrx_stats_request = dp_txrx_stats_request,
  5005. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  5006. .display_stats = dp_txrx_dump_stats,
  5007. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  5008. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  5009. #ifdef DP_INTR_POLL_BASED
  5010. .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
  5011. #else
  5012. .txrx_intr_attach = dp_soc_interrupt_attach,
  5013. #endif
  5014. .txrx_intr_detach = dp_soc_interrupt_detach,
  5015. .set_pn_check = dp_set_pn_check_wifi3,
  5016. .update_config_parameters = dp_update_config_parameters,
  5017. /* TODO: Add other functions */
  5018. .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set
  5019. };
  5020. static struct cdp_ctrl_ops dp_ops_ctrl = {
  5021. .txrx_peer_authorize = dp_peer_authorize,
  5022. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  5023. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  5024. #ifdef MESH_MODE_SUPPORT
  5025. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  5026. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  5027. #endif
  5028. .txrx_set_vdev_param = dp_set_vdev_param,
  5029. .txrx_peer_set_nawds = dp_peer_set_nawds,
  5030. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  5031. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  5032. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  5033. .txrx_update_filter_neighbour_peers =
  5034. dp_update_filter_neighbour_peers,
  5035. .txrx_get_sec_type = dp_get_sec_type,
  5036. /* TODO: Add other functions */
  5037. .txrx_wdi_event_sub = dp_wdi_event_sub,
  5038. .txrx_wdi_event_unsub = dp_wdi_event_unsub,
  5039. #ifdef WDI_EVENT_ENABLE
  5040. .txrx_get_pldev = dp_get_pldev,
  5041. #endif
  5042. .txrx_set_pdev_param = dp_set_pdev_param,
  5043. };
  5044. static struct cdp_me_ops dp_ops_me = {
  5045. #ifdef ATH_SUPPORT_IQUE
  5046. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  5047. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  5048. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  5049. #endif
  5050. };
  5051. static struct cdp_mon_ops dp_ops_mon = {
  5052. .txrx_monitor_set_filter_ucast_data = NULL,
  5053. .txrx_monitor_set_filter_mcast_data = NULL,
  5054. .txrx_monitor_set_filter_non_data = NULL,
  5055. .txrx_monitor_get_filter_ucast_data = NULL,
  5056. .txrx_monitor_get_filter_mcast_data = NULL,
  5057. .txrx_monitor_get_filter_non_data = NULL,
  5058. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  5059. /* Added support for HK advance filter */
  5060. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  5061. };
  5062. static struct cdp_host_stats_ops dp_ops_host_stats = {
  5063. .txrx_per_peer_stats = dp_get_host_peer_stats,
  5064. .get_fw_peer_stats = dp_get_fw_peer_stats,
  5065. .get_htt_stats = dp_get_htt_stats,
  5066. .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
  5067. .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
  5068. /* TODO */
  5069. };
  5070. static struct cdp_raw_ops dp_ops_raw = {
  5071. /* TODO */
  5072. };
  5073. #ifdef CONFIG_WIN
  5074. static struct cdp_pflow_ops dp_ops_pflow = {
  5075. /* TODO */
  5076. };
  5077. #endif /* CONFIG_WIN */
  5078. #ifdef FEATURE_RUNTIME_PM
  5079. /**
  5080. * dp_runtime_suspend() - ensure DP is ready to runtime suspend
  5081. * @opaque_pdev: DP pdev context
  5082. *
  5083. * DP is ready to runtime suspend if there are no pending TX packets.
  5084. *
  5085. * Return: QDF_STATUS
  5086. */
  5087. static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
  5088. {
  5089. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  5090. struct dp_soc *soc = pdev->soc;
  5091. /* Call DP TX flow control API to check if there is any
  5092. pending packets */
  5093. if (soc->intr_mode == DP_INTR_POLL)
  5094. qdf_timer_stop(&soc->int_timer);
  5095. return QDF_STATUS_SUCCESS;
  5096. }
  5097. /**
  5098. * dp_runtime_resume() - ensure DP is ready to runtime resume
  5099. * @opaque_pdev: DP pdev context
  5100. *
  5101. * Resume DP for runtime PM.
  5102. *
  5103. * Return: QDF_STATUS
  5104. */
  5105. static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
  5106. {
  5107. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  5108. struct dp_soc *soc = pdev->soc;
  5109. void *hal_srng;
  5110. int i;
  5111. if (soc->intr_mode == DP_INTR_POLL)
  5112. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  5113. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  5114. hal_srng = soc->tcl_data_ring[i].hal_srng;
  5115. if (hal_srng) {
  5116. /* We actually only need to acquire the lock */
  5117. hal_srng_access_start(soc->hal_soc, hal_srng);
  5118. /* Update SRC ring head pointer for HW to send
  5119. all pending packets */
  5120. hal_srng_access_end(soc->hal_soc, hal_srng);
  5121. }
  5122. }
  5123. return QDF_STATUS_SUCCESS;
  5124. }
  5125. #endif /* FEATURE_RUNTIME_PM */
  5126. static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
  5127. {
  5128. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  5129. struct dp_soc *soc = pdev->soc;
  5130. if (soc->intr_mode == DP_INTR_POLL)
  5131. qdf_timer_stop(&soc->int_timer);
  5132. return QDF_STATUS_SUCCESS;
  5133. }
  5134. static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
  5135. {
  5136. struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
  5137. struct dp_soc *soc = pdev->soc;
  5138. if (soc->intr_mode == DP_INTR_POLL)
  5139. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  5140. return QDF_STATUS_SUCCESS;
  5141. }
  5142. #ifndef CONFIG_WIN
  5143. static struct cdp_misc_ops dp_ops_misc = {
  5144. .tx_non_std = dp_tx_non_std,
  5145. .get_opmode = dp_get_opmode,
  5146. #ifdef FEATURE_RUNTIME_PM
  5147. .runtime_suspend = dp_runtime_suspend,
  5148. .runtime_resume = dp_runtime_resume,
  5149. #endif /* FEATURE_RUNTIME_PM */
  5150. .pkt_log_init = dp_pkt_log_init,
  5151. .pkt_log_con_service = dp_pkt_log_con_service,
  5152. };
  5153. static struct cdp_flowctl_ops dp_ops_flowctl = {
  5154. /* WIFI 3.0 DP implement as required. */
  5155. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5156. .register_pause_cb = dp_txrx_register_pause_cb,
  5157. .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
  5158. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  5159. };
  5160. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  5161. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  5162. };
  5163. #ifdef IPA_OFFLOAD
  5164. static struct cdp_ipa_ops dp_ops_ipa = {
  5165. .ipa_get_resource = dp_ipa_get_resource,
  5166. .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
  5167. .ipa_op_response = dp_ipa_op_response,
  5168. .ipa_register_op_cb = dp_ipa_register_op_cb,
  5169. .ipa_get_stat = dp_ipa_get_stat,
  5170. .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
  5171. .ipa_enable_autonomy = dp_ipa_enable_autonomy,
  5172. .ipa_disable_autonomy = dp_ipa_disable_autonomy,
  5173. .ipa_setup = dp_ipa_setup,
  5174. .ipa_cleanup = dp_ipa_cleanup,
  5175. .ipa_setup_iface = dp_ipa_setup_iface,
  5176. .ipa_cleanup_iface = dp_ipa_cleanup_iface,
  5177. .ipa_enable_pipes = dp_ipa_enable_pipes,
  5178. .ipa_disable_pipes = dp_ipa_disable_pipes,
  5179. .ipa_set_perf_level = dp_ipa_set_perf_level
  5180. };
  5181. #endif
  5182. static struct cdp_bus_ops dp_ops_bus = {
  5183. .bus_suspend = dp_bus_suspend,
  5184. .bus_resume = dp_bus_resume
  5185. };
  5186. static struct cdp_ocb_ops dp_ops_ocb = {
  5187. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  5188. };
  5189. static struct cdp_throttle_ops dp_ops_throttle = {
  5190. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  5191. };
  5192. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  5193. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  5194. };
  5195. static struct cdp_cfg_ops dp_ops_cfg = {
  5196. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  5197. };
  5198. /*
  5199. * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
  5200. * @dev: physical device instance
  5201. * @peer_mac_addr: peer mac address
  5202. * @local_id: local id for the peer
  5203. * @debug_id: to track enum peer access
  5204. * Return: peer instance pointer
  5205. */
  5206. static inline void *
  5207. dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
  5208. u8 *local_id,
  5209. enum peer_debug_id_type debug_id)
  5210. {
  5211. /*
  5212. * Currently this function does not implement the "get ref"
  5213. * functionality and is mapped to dp_find_peer_by_addr which does not
  5214. * increment the peer ref count. So the peer state is uncertain after
  5215. * calling this API. The functionality needs to be implemented.
  5216. * Accordingly the corresponding release_ref function is NULL.
  5217. */
  5218. return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
  5219. }
  5220. static struct cdp_peer_ops dp_ops_peer = {
  5221. .register_peer = dp_register_peer,
  5222. .clear_peer = dp_clear_peer,
  5223. .find_peer_by_addr = dp_find_peer_by_addr,
  5224. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  5225. .peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
  5226. .peer_release_ref = NULL,
  5227. .local_peer_id = dp_local_peer_id,
  5228. .peer_find_by_local_id = dp_peer_find_by_local_id,
  5229. .peer_state_update = dp_peer_state_update,
  5230. .get_vdevid = dp_get_vdevid,
  5231. .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
  5232. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  5233. .get_vdev_for_peer = dp_get_vdev_for_peer,
  5234. .get_peer_state = dp_get_peer_state,
  5235. .last_assoc_received = dp_get_last_assoc_received,
  5236. .last_disassoc_received = dp_get_last_disassoc_received,
  5237. .last_deauth_received = dp_get_last_deauth_received,
  5238. };
  5239. #endif
  5240. static struct cdp_ops dp_txrx_ops = {
  5241. .cmn_drv_ops = &dp_ops_cmn,
  5242. .ctrl_ops = &dp_ops_ctrl,
  5243. .me_ops = &dp_ops_me,
  5244. .mon_ops = &dp_ops_mon,
  5245. .host_stats_ops = &dp_ops_host_stats,
  5246. .wds_ops = &dp_ops_wds,
  5247. .raw_ops = &dp_ops_raw,
  5248. #ifdef CONFIG_WIN
  5249. .pflow_ops = &dp_ops_pflow,
  5250. #endif /* CONFIG_WIN */
  5251. #ifndef CONFIG_WIN
  5252. .misc_ops = &dp_ops_misc,
  5253. .cfg_ops = &dp_ops_cfg,
  5254. .flowctl_ops = &dp_ops_flowctl,
  5255. .l_flowctl_ops = &dp_ops_l_flowctl,
  5256. #ifdef IPA_OFFLOAD
  5257. .ipa_ops = &dp_ops_ipa,
  5258. #endif
  5259. .bus_ops = &dp_ops_bus,
  5260. .ocb_ops = &dp_ops_ocb,
  5261. .peer_ops = &dp_ops_peer,
  5262. .throttle_ops = &dp_ops_throttle,
  5263. .mob_stats_ops = &dp_ops_mob_stats,
  5264. #endif
  5265. };
  5266. /*
  5267. * dp_soc_set_txrx_ring_map()
  5268. * @dp_soc: DP handler for soc
  5269. *
  5270. * Return: Void
  5271. */
  5272. static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  5273. {
  5274. uint32_t i;
  5275. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  5276. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
  5277. }
  5278. }
  5279. /*
  5280. * dp_soc_attach_wifi3() - Attach txrx SOC
  5281. * @osif_soc: Opaque SOC handle from OSIF/HDD
  5282. * @htc_handle: Opaque HTC handle
  5283. * @hif_handle: Opaque HIF handle
  5284. * @qdf_osdev: QDF device
  5285. *
  5286. * Return: DP SOC handle on success, NULL on failure
  5287. */
  5288. /*
  5289. * Local prototype added to temporarily address warning caused by
  5290. * -Wmissing-prototypes. A more correct solution, namely to expose
  5291. * a prototype in an appropriate header file, will come later.
  5292. */
  5293. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  5294. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  5295. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc);
  5296. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  5297. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  5298. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc)
  5299. {
  5300. struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
  5301. if (!soc) {
  5302. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5303. FL("DP SOC memory allocation failed"));
  5304. goto fail0;
  5305. }
  5306. soc->cdp_soc.ops = &dp_txrx_ops;
  5307. soc->cdp_soc.ol_ops = ol_ops;
  5308. soc->osif_soc = osif_soc;
  5309. soc->osdev = qdf_osdev;
  5310. soc->hif_handle = hif_handle;
  5311. soc->psoc = psoc;
  5312. soc->hal_soc = hif_get_hal_handle(hif_handle);
  5313. soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
  5314. soc->hal_soc, qdf_osdev);
  5315. if (!soc->htt_handle) {
  5316. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5317. FL("HTT attach failed"));
  5318. goto fail1;
  5319. }
  5320. soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
  5321. if (!soc->wlan_cfg_ctx) {
  5322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5323. FL("wlan_cfg_soc_attach failed"));
  5324. goto fail2;
  5325. }
  5326. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
  5327. soc->cce_disable = false;
  5328. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  5329. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->osif_soc,
  5330. CDP_CFG_MAX_PEER_ID);
  5331. if (ret != -EINVAL) {
  5332. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  5333. }
  5334. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->osif_soc,
  5335. CDP_CFG_CCE_DISABLE);
  5336. if (ret)
  5337. soc->cce_disable = true;
  5338. }
  5339. qdf_spinlock_create(&soc->peer_ref_mutex);
  5340. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  5341. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  5342. /* fill the tx/rx cpu ring map*/
  5343. dp_soc_set_txrx_ring_map(soc);
  5344. qdf_spinlock_create(&soc->htt_stats.lock);
  5345. /* initialize work queue for stats processing */
  5346. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  5347. return (void *)soc;
  5348. fail2:
  5349. htt_soc_detach(soc->htt_handle);
  5350. fail1:
  5351. qdf_mem_free(soc);
  5352. fail0:
  5353. return NULL;
  5354. }
  5355. /*
  5356. * dp_get_pdev_for_mac_id() - Return pdev for mac_id
  5357. *
  5358. * @soc: handle to DP soc
  5359. * @mac_id: MAC id
  5360. *
  5361. * Return: Return pdev corresponding to MAC
  5362. */
  5363. void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  5364. {
  5365. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  5366. return soc->pdev_list[mac_id];
  5367. /* Typically for MCL as there only 1 PDEV*/
  5368. return soc->pdev_list[0];
  5369. }
  5370. /*
  5371. * dp_get_ring_id_for_mac_id() - Return pdev for mac_id
  5372. *
  5373. * @soc: handle to DP soc
  5374. * @mac_id: MAC id
  5375. *
  5376. * Return: ring id
  5377. */
  5378. int dp_get_ring_id_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
  5379. {
  5380. /*
  5381. * Single pdev using both MACs will operate on both MAC rings,
  5382. * which is the case for MCL.
  5383. */
  5384. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  5385. return mac_id;
  5386. /* For WIN each PDEV will operate one ring, so index is zero. */
  5387. return 0;
  5388. }
  5389. /*
  5390. * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
  5391. * @soc: DP SoC context
  5392. * @max_mac_rings: No of MAC rings
  5393. *
  5394. * Return: None
  5395. */
  5396. static
  5397. void dp_is_hw_dbs_enable(struct dp_soc *soc,
  5398. int *max_mac_rings)
  5399. {
  5400. bool dbs_enable = false;
  5401. if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
  5402. dbs_enable = soc->cdp_soc.ol_ops->
  5403. is_hw_dbs_2x2_capable(soc->psoc);
  5404. *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
  5405. }
  5406. /*
  5407. * dp_set_pktlog_wifi3() - attach txrx vdev
  5408. * @pdev: Datapath PDEV handle
  5409. * @event: which event's notifications are being subscribed to
  5410. * @enable: WDI event subscribe or not. (True or False)
  5411. *
  5412. * Return: Success, NULL on failure
  5413. */
  5414. #ifdef WDI_EVENT_ENABLE
  5415. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  5416. bool enable)
  5417. {
  5418. struct dp_soc *soc = pdev->soc;
  5419. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  5420. int max_mac_rings = wlan_cfg_get_num_mac_rings
  5421. (pdev->wlan_cfg_ctx);
  5422. uint8_t mac_id = 0;
  5423. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  5424. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  5425. FL("Max_mac_rings %d \n"),
  5426. max_mac_rings);
  5427. if (enable) {
  5428. switch (event) {
  5429. case WDI_EVENT_RX_DESC:
  5430. if (pdev->monitor_vdev) {
  5431. /* Nothing needs to be done if monitor mode is
  5432. * enabled
  5433. */
  5434. return 0;
  5435. }
  5436. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  5437. pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  5438. htt_tlv_filter.mpdu_start = 1;
  5439. htt_tlv_filter.msdu_start = 1;
  5440. htt_tlv_filter.msdu_end = 1;
  5441. htt_tlv_filter.mpdu_end = 1;
  5442. htt_tlv_filter.packet_header = 1;
  5443. htt_tlv_filter.attention = 1;
  5444. htt_tlv_filter.ppdu_start = 1;
  5445. htt_tlv_filter.ppdu_end = 1;
  5446. htt_tlv_filter.ppdu_end_user_stats = 1;
  5447. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5448. htt_tlv_filter.ppdu_end_status_done = 1;
  5449. htt_tlv_filter.enable_fp = 1;
  5450. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5451. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5452. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5453. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5454. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5455. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5456. for (mac_id = 0; mac_id < max_mac_rings;
  5457. mac_id++) {
  5458. htt_h2t_rx_ring_cfg(soc->htt_handle,
  5459. pdev->pdev_id + mac_id,
  5460. pdev->rxdma_mon_status_ring
  5461. .hal_srng,
  5462. RXDMA_MONITOR_STATUS,
  5463. RX_BUFFER_SIZE,
  5464. &htt_tlv_filter);
  5465. }
  5466. if (soc->reap_timer_init)
  5467. qdf_timer_mod(&soc->mon_reap_timer,
  5468. DP_INTR_POLL_TIMER_MS);
  5469. }
  5470. break;
  5471. case WDI_EVENT_LITE_RX:
  5472. if (pdev->monitor_vdev) {
  5473. /* Nothing needs to be done if monitor mode is
  5474. * enabled
  5475. */
  5476. return 0;
  5477. }
  5478. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  5479. pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  5480. htt_tlv_filter.ppdu_start = 1;
  5481. htt_tlv_filter.ppdu_end = 1;
  5482. htt_tlv_filter.ppdu_end_user_stats = 1;
  5483. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  5484. htt_tlv_filter.ppdu_end_status_done = 1;
  5485. htt_tlv_filter.mpdu_start = 1;
  5486. htt_tlv_filter.enable_fp = 1;
  5487. htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
  5488. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
  5489. htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
  5490. htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
  5491. htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
  5492. htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
  5493. for (mac_id = 0; mac_id < max_mac_rings;
  5494. mac_id++) {
  5495. htt_h2t_rx_ring_cfg(soc->htt_handle,
  5496. pdev->pdev_id + mac_id,
  5497. pdev->rxdma_mon_status_ring
  5498. .hal_srng,
  5499. RXDMA_MONITOR_STATUS,
  5500. RX_BUFFER_SIZE_PKTLOG_LITE,
  5501. &htt_tlv_filter);
  5502. }
  5503. if (soc->reap_timer_init)
  5504. qdf_timer_mod(&soc->mon_reap_timer,
  5505. DP_INTR_POLL_TIMER_MS);
  5506. }
  5507. break;
  5508. case WDI_EVENT_LITE_T2H:
  5509. if (pdev->monitor_vdev) {
  5510. /* Nothing needs to be done if monitor mode is
  5511. * enabled
  5512. */
  5513. return 0;
  5514. }
  5515. /* To enable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  5516. * passing value 0xffff. Once these macros will define
  5517. * in htt header file will use proper macros
  5518. */
  5519. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  5520. dp_h2t_cfg_stats_msg_send(pdev, 0xffff,
  5521. pdev->pdev_id + mac_id);
  5522. }
  5523. break;
  5524. default:
  5525. /* Nothing needs to be done for other pktlog types */
  5526. break;
  5527. }
  5528. } else {
  5529. switch (event) {
  5530. case WDI_EVENT_RX_DESC:
  5531. case WDI_EVENT_LITE_RX:
  5532. if (pdev->monitor_vdev) {
  5533. /* Nothing needs to be done if monitor mode is
  5534. * enabled
  5535. */
  5536. return 0;
  5537. }
  5538. if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  5539. pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  5540. for (mac_id = 0; mac_id < max_mac_rings;
  5541. mac_id++) {
  5542. htt_h2t_rx_ring_cfg(soc->htt_handle,
  5543. pdev->pdev_id + mac_id,
  5544. pdev->rxdma_mon_status_ring
  5545. .hal_srng,
  5546. RXDMA_MONITOR_STATUS,
  5547. RX_BUFFER_SIZE,
  5548. &htt_tlv_filter);
  5549. }
  5550. if (soc->reap_timer_init)
  5551. qdf_timer_stop(&soc->mon_reap_timer);
  5552. }
  5553. break;
  5554. case WDI_EVENT_LITE_T2H:
  5555. if (pdev->monitor_vdev) {
  5556. /* Nothing needs to be done if monitor mode is
  5557. * enabled
  5558. */
  5559. return 0;
  5560. }
  5561. /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  5562. * passing value 0. Once these macros will define in htt
  5563. * header file will use proper macros
  5564. */
  5565. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  5566. dp_h2t_cfg_stats_msg_send(pdev, 0,
  5567. pdev->pdev_id + mac_id);
  5568. }
  5569. break;
  5570. default:
  5571. /* Nothing needs to be done for other pktlog types */
  5572. break;
  5573. }
  5574. }
  5575. return 0;
  5576. }
  5577. #endif
  5578. #ifdef CONFIG_MCL
  5579. /*
  5580. * dp_service_mon_rings()- timer to reap monitor rings
  5581. * reqd as we are not getting ppdu end interrupts
  5582. * @arg: SoC Handle
  5583. *
  5584. * Return:
  5585. *
  5586. */
  5587. static void dp_service_mon_rings(void *arg)
  5588. {
  5589. struct dp_soc *soc = (struct dp_soc *) arg;
  5590. int ring = 0, work_done;
  5591. work_done = dp_mon_process(soc, ring, QCA_NAPI_BUDGET);
  5592. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  5593. FL("Reaped %d descs from Monitor rings"), work_done);
  5594. qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  5595. }
  5596. #ifndef REMOVE_PKT_LOG
  5597. /**
  5598. * dp_pkt_log_init() - API to initialize packet log
  5599. * @ppdev: physical device handle
  5600. * @scn: HIF context
  5601. *
  5602. * Return: none
  5603. */
  5604. void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
  5605. {
  5606. struct dp_pdev *handle = (struct dp_pdev *)ppdev;
  5607. if (handle->pkt_log_init) {
  5608. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5609. "%s: Packet log not initialized", __func__);
  5610. return;
  5611. }
  5612. pktlog_sethandle(&handle->pl_dev, scn);
  5613. pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
  5614. if (pktlogmod_init(scn)) {
  5615. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5616. "%s: pktlogmod_init failed", __func__);
  5617. handle->pkt_log_init = false;
  5618. } else {
  5619. handle->pkt_log_init = true;
  5620. }
  5621. }
  5622. /**
  5623. * dp_pkt_log_con_service() - connect packet log service
  5624. * @ppdev: physical device handle
  5625. * @scn: device context
  5626. *
  5627. * Return: none
  5628. */
  5629. static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
  5630. {
  5631. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  5632. dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
  5633. pktlog_htc_attach();
  5634. }
  5635. /**
  5636. * dp_pktlogmod_exit() - API to cleanup pktlog info
  5637. * @handle: Pdev handle
  5638. *
  5639. * Return: none
  5640. */
  5641. static void dp_pktlogmod_exit(struct dp_pdev *handle)
  5642. {
  5643. void *scn = (void *)handle->soc->hif_handle;
  5644. if (!scn) {
  5645. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5646. "%s: Invalid hif(scn) handle", __func__);
  5647. return;
  5648. }
  5649. pktlogmod_exit(scn);
  5650. handle->pkt_log_init = false;
  5651. }
  5652. #endif
  5653. #else
  5654. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  5655. #endif