wmi.c 255 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183
  1. // SPDX-License-Identifier: BSD-3-Clause-Clear
  2. /*
  3. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/skbuff.h>
  7. #include <linux/ctype.h>
  8. #include <net/mac80211.h>
  9. #include <net/cfg80211.h>
  10. #include <linux/completion.h>
  11. #include <linux/if_ether.h>
  12. #include <linux/types.h>
  13. #include <linux/pci.h>
  14. #include <linux/uuid.h>
  15. #include <linux/time.h>
  16. #include <linux/of.h>
  17. #include "core.h"
  18. #include "debug.h"
  19. #include "mac.h"
  20. #include "hw.h"
  21. #include "peer.h"
  22. struct wmi_tlv_policy {
  23. size_t min_len;
  24. };
  25. struct wmi_tlv_svc_ready_parse {
  26. bool wmi_svc_bitmap_done;
  27. };
  28. struct wmi_tlv_dma_ring_caps_parse {
  29. struct wmi_dma_ring_capabilities *dma_ring_caps;
  30. u32 n_dma_ring_caps;
  31. };
  32. struct wmi_tlv_svc_rdy_ext_parse {
  33. struct ath11k_service_ext_param param;
  34. struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
  35. struct wmi_hw_mode_capabilities *hw_mode_caps;
  36. u32 n_hw_mode_caps;
  37. u32 tot_phy_id;
  38. struct wmi_hw_mode_capabilities pref_hw_mode_caps;
  39. struct wmi_mac_phy_capabilities *mac_phy_caps;
  40. u32 n_mac_phy_caps;
  41. struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
  42. struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
  43. u32 n_ext_hal_reg_caps;
  44. struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
  45. bool hw_mode_done;
  46. bool mac_phy_done;
  47. bool ext_hal_reg_done;
  48. bool mac_phy_chainmask_combo_done;
  49. bool mac_phy_chainmask_cap_done;
  50. bool oem_dma_ring_cap_done;
  51. bool dma_ring_cap_done;
  52. };
  53. struct wmi_tlv_svc_rdy_ext2_parse {
  54. struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
  55. bool dma_ring_cap_done;
  56. };
  57. struct wmi_tlv_rdy_parse {
  58. u32 num_extra_mac_addr;
  59. };
  60. struct wmi_tlv_dma_buf_release_parse {
  61. struct ath11k_wmi_dma_buf_release_fixed_param fixed;
  62. struct wmi_dma_buf_release_entry *buf_entry;
  63. struct wmi_dma_buf_release_meta_data *meta_data;
  64. u32 num_buf_entry;
  65. u32 num_meta;
  66. bool buf_entry_done;
  67. bool meta_data_done;
  68. };
  69. struct wmi_tlv_fw_stats_parse {
  70. const struct wmi_stats_event *ev;
  71. const struct wmi_per_chain_rssi_stats *rssi;
  72. struct ath11k_fw_stats *stats;
  73. int rssi_num;
  74. bool chain_rssi_done;
  75. };
  76. static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  77. [WMI_TAG_ARRAY_BYTE]
  78. = { .min_len = 0 },
  79. [WMI_TAG_ARRAY_UINT32]
  80. = { .min_len = 0 },
  81. [WMI_TAG_SERVICE_READY_EVENT]
  82. = { .min_len = sizeof(struct wmi_service_ready_event) },
  83. [WMI_TAG_SERVICE_READY_EXT_EVENT]
  84. = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
  85. [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
  86. = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
  87. [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
  88. = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
  89. [WMI_TAG_VDEV_START_RESPONSE_EVENT]
  90. = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
  91. [WMI_TAG_PEER_DELETE_RESP_EVENT]
  92. = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
  93. [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
  94. = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
  95. [WMI_TAG_VDEV_STOPPED_EVENT]
  96. = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
  97. [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
  98. = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
  99. [WMI_TAG_MGMT_RX_HDR]
  100. = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
  101. [WMI_TAG_MGMT_TX_COMPL_EVENT]
  102. = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
  103. [WMI_TAG_SCAN_EVENT]
  104. = { .min_len = sizeof(struct wmi_scan_event) },
  105. [WMI_TAG_PEER_STA_KICKOUT_EVENT]
  106. = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  107. [WMI_TAG_ROAM_EVENT]
  108. = { .min_len = sizeof(struct wmi_roam_event) },
  109. [WMI_TAG_CHAN_INFO_EVENT]
  110. = { .min_len = sizeof(struct wmi_chan_info_event) },
  111. [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
  112. = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
  113. [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
  114. = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
  115. [WMI_TAG_READY_EVENT] = {
  116. .min_len = sizeof(struct wmi_ready_event_min) },
  117. [WMI_TAG_SERVICE_AVAILABLE_EVENT]
  118. = {.min_len = sizeof(struct wmi_service_available_event) },
  119. [WMI_TAG_PEER_ASSOC_CONF_EVENT]
  120. = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
  121. [WMI_TAG_STATS_EVENT]
  122. = { .min_len = sizeof(struct wmi_stats_event) },
  123. [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
  124. = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
  125. [WMI_TAG_HOST_SWFDA_EVENT] = {
  126. .min_len = sizeof(struct wmi_fils_discovery_event) },
  127. [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
  128. .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
  129. [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
  130. .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
  131. [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
  132. .min_len = sizeof(struct wmi_obss_color_collision_event) },
  133. [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
  134. .min_len = sizeof(struct wmi_11d_new_cc_ev) },
  135. [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
  136. .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
  137. [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
  138. .min_len = sizeof(struct wmi_twt_add_dialog_event) },
  139. };
  140. #define PRIMAP(_hw_mode_) \
  141. [_hw_mode_] = _hw_mode_##_PRI
  142. static const int ath11k_hw_mode_pri_map[] = {
  143. PRIMAP(WMI_HOST_HW_MODE_SINGLE),
  144. PRIMAP(WMI_HOST_HW_MODE_DBS),
  145. PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
  146. PRIMAP(WMI_HOST_HW_MODE_SBS),
  147. PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
  148. PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
  149. /* keep last */
  150. PRIMAP(WMI_HOST_HW_MODE_MAX),
  151. };
  152. static int
  153. ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
  154. int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
  155. const void *ptr, void *data),
  156. void *data)
  157. {
  158. const void *begin = ptr;
  159. const struct wmi_tlv *tlv;
  160. u16 tlv_tag, tlv_len;
  161. int ret;
  162. while (len > 0) {
  163. if (len < sizeof(*tlv)) {
  164. ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  165. ptr - begin, len, sizeof(*tlv));
  166. return -EINVAL;
  167. }
  168. tlv = ptr;
  169. tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
  170. tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
  171. ptr += sizeof(*tlv);
  172. len -= sizeof(*tlv);
  173. if (tlv_len > len) {
  174. ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
  175. tlv_tag, ptr - begin, len, tlv_len);
  176. return -EINVAL;
  177. }
  178. if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
  179. wmi_tlv_policies[tlv_tag].min_len &&
  180. wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
  181. ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
  182. tlv_tag, ptr - begin, tlv_len,
  183. wmi_tlv_policies[tlv_tag].min_len);
  184. return -EINVAL;
  185. }
  186. ret = iter(ab, tlv_tag, tlv_len, ptr, data);
  187. if (ret)
  188. return ret;
  189. ptr += tlv_len;
  190. len -= tlv_len;
  191. }
  192. return 0;
  193. }
  194. static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
  195. const void *ptr, void *data)
  196. {
  197. const void **tb = data;
  198. if (tag < WMI_TAG_MAX)
  199. tb[tag] = ptr;
  200. return 0;
  201. }
  202. static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
  203. const void *ptr, size_t len)
  204. {
  205. return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
  206. (void *)tb);
  207. }
  208. static const void **
  209. ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
  210. size_t len, gfp_t gfp)
  211. {
  212. const void **tb;
  213. int ret;
  214. tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
  215. if (!tb)
  216. return ERR_PTR(-ENOMEM);
  217. ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
  218. if (ret) {
  219. kfree(tb);
  220. return ERR_PTR(ret);
  221. }
  222. return tb;
  223. }
  224. static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
  225. u32 cmd_id)
  226. {
  227. struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
  228. struct ath11k_base *ab = wmi->wmi_ab->ab;
  229. struct wmi_cmd_hdr *cmd_hdr;
  230. int ret;
  231. u32 cmd = 0;
  232. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  233. return -ENOMEM;
  234. cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
  235. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  236. cmd_hdr->cmd_id = cmd;
  237. trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
  238. memset(skb_cb, 0, sizeof(*skb_cb));
  239. ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
  240. if (ret)
  241. goto err_pull;
  242. return 0;
  243. err_pull:
  244. skb_pull(skb, sizeof(struct wmi_cmd_hdr));
  245. return ret;
  246. }
  247. int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
  248. u32 cmd_id)
  249. {
  250. struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
  251. int ret = -EOPNOTSUPP;
  252. struct ath11k_base *ab = wmi_sc->ab;
  253. might_sleep();
  254. if (ab->hw_params.credit_flow) {
  255. wait_event_timeout(wmi_sc->tx_credits_wq, ({
  256. ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
  257. if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
  258. &wmi_sc->ab->dev_flags))
  259. ret = -ESHUTDOWN;
  260. (ret != -EAGAIN);
  261. }), WMI_SEND_TIMEOUT_HZ);
  262. } else {
  263. wait_event_timeout(wmi->tx_ce_desc_wq, ({
  264. ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
  265. if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
  266. &wmi_sc->ab->dev_flags))
  267. ret = -ESHUTDOWN;
  268. (ret != -ENOBUFS);
  269. }), WMI_SEND_TIMEOUT_HZ);
  270. }
  271. if (ret == -EAGAIN)
  272. ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
  273. if (ret == -ENOBUFS)
  274. ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
  275. cmd_id);
  276. return ret;
  277. }
  278. static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
  279. const void *ptr,
  280. struct ath11k_service_ext_param *param)
  281. {
  282. const struct wmi_service_ready_ext_event *ev = ptr;
  283. if (!ev)
  284. return -EINVAL;
  285. /* Move this to host based bitmap */
  286. param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
  287. param->default_fw_config_bits = ev->default_fw_config_bits;
  288. param->he_cap_info = ev->he_cap_info;
  289. param->mpdu_density = ev->mpdu_density;
  290. param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
  291. memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
  292. return 0;
  293. }
  294. static int
  295. ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
  296. struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
  297. struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
  298. struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
  299. struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
  300. u8 hw_mode_id, u8 phy_id,
  301. struct ath11k_pdev *pdev)
  302. {
  303. struct wmi_mac_phy_capabilities *mac_phy_caps;
  304. struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
  305. struct ath11k_band_cap *cap_band;
  306. struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
  307. u32 phy_map;
  308. u32 hw_idx, phy_idx = 0;
  309. if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
  310. return -EINVAL;
  311. for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
  312. if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
  313. break;
  314. phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
  315. while (phy_map) {
  316. phy_map >>= 1;
  317. phy_idx++;
  318. }
  319. }
  320. if (hw_idx == hw_caps->num_hw_modes)
  321. return -EINVAL;
  322. phy_idx += phy_id;
  323. if (phy_id >= hal_reg_caps->num_phy)
  324. return -EINVAL;
  325. mac_phy_caps = wmi_mac_phy_caps + phy_idx;
  326. pdev->pdev_id = mac_phy_caps->pdev_id;
  327. pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
  328. pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
  329. ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
  330. mac_phy_caps->supported_bands;
  331. ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
  332. ab->target_pdev_count++;
  333. if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
  334. !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
  335. return -EINVAL;
  336. /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
  337. * band to band for a single radio, need to see how this should be
  338. * handled.
  339. */
  340. if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
  341. pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
  342. pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
  343. }
  344. if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
  345. pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
  346. pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
  347. pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
  348. pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
  349. pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
  350. pdev_cap->nss_ratio_enabled =
  351. WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
  352. pdev_cap->nss_ratio_info =
  353. WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
  354. }
  355. /* tx/rx chainmask reported from fw depends on the actual hw chains used,
  356. * For example, for 4x4 capable macphys, first 4 chains can be used for first
  357. * mac and the remaining 4 chains can be used for the second mac or vice-versa.
  358. * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
  359. * will be advertised for second mac or vice-versa. Compute the shift value
  360. * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
  361. * mac80211.
  362. */
  363. pdev_cap->tx_chain_mask_shift =
  364. find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
  365. pdev_cap->rx_chain_mask_shift =
  366. find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
  367. if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
  368. cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
  369. cap_band->phy_id = mac_phy_caps->phy_id;
  370. cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
  371. cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
  372. cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
  373. cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
  374. cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
  375. memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
  376. sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
  377. memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
  378. sizeof(struct ath11k_ppe_threshold));
  379. }
  380. if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
  381. cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
  382. cap_band->phy_id = mac_phy_caps->phy_id;
  383. cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
  384. cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
  385. cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
  386. cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
  387. cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
  388. memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
  389. sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
  390. memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
  391. sizeof(struct ath11k_ppe_threshold));
  392. cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
  393. cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
  394. cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
  395. cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
  396. cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
  397. cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
  398. memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
  399. sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
  400. memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
  401. sizeof(struct ath11k_ppe_threshold));
  402. }
  403. return 0;
  404. }
  405. static int
  406. ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
  407. struct wmi_soc_hal_reg_capabilities *reg_caps,
  408. struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
  409. u8 phy_idx,
  410. struct ath11k_hal_reg_capabilities_ext *param)
  411. {
  412. struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
  413. if (!reg_caps || !wmi_ext_reg_cap)
  414. return -EINVAL;
  415. if (phy_idx >= reg_caps->num_phy)
  416. return -EINVAL;
  417. ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
  418. param->phy_id = ext_reg_cap->phy_id;
  419. param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
  420. param->eeprom_reg_domain_ext =
  421. ext_reg_cap->eeprom_reg_domain_ext;
  422. param->regcap1 = ext_reg_cap->regcap1;
  423. param->regcap2 = ext_reg_cap->regcap2;
  424. /* check if param->wireless_mode is needed */
  425. param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
  426. param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
  427. param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
  428. param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
  429. return 0;
  430. }
  431. static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
  432. const void *evt_buf,
  433. struct ath11k_targ_cap *cap)
  434. {
  435. const struct wmi_service_ready_event *ev = evt_buf;
  436. if (!ev) {
  437. ath11k_err(ab, "%s: failed by NULL param\n",
  438. __func__);
  439. return -EINVAL;
  440. }
  441. cap->phy_capability = ev->phy_capability;
  442. cap->max_frag_entry = ev->max_frag_entry;
  443. cap->num_rf_chains = ev->num_rf_chains;
  444. cap->ht_cap_info = ev->ht_cap_info;
  445. cap->vht_cap_info = ev->vht_cap_info;
  446. cap->vht_supp_mcs = ev->vht_supp_mcs;
  447. cap->hw_min_tx_power = ev->hw_min_tx_power;
  448. cap->hw_max_tx_power = ev->hw_max_tx_power;
  449. cap->sys_cap_info = ev->sys_cap_info;
  450. cap->min_pkt_size_enable = ev->min_pkt_size_enable;
  451. cap->max_bcn_ie_size = ev->max_bcn_ie_size;
  452. cap->max_num_scan_channels = ev->max_num_scan_channels;
  453. cap->max_supported_macs = ev->max_supported_macs;
  454. cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
  455. cap->txrx_chainmask = ev->txrx_chainmask;
  456. cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
  457. cap->num_msdu_desc = ev->num_msdu_desc;
  458. return 0;
  459. }
  460. /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
  461. * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
  462. * 4-byte word.
  463. */
  464. static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
  465. const u32 *wmi_svc_bm)
  466. {
  467. int i, j;
  468. for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
  469. do {
  470. if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
  471. set_bit(j, wmi->wmi_ab->svc_map);
  472. } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
  473. }
  474. }
  475. static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
  476. const void *ptr, void *data)
  477. {
  478. struct wmi_tlv_svc_ready_parse *svc_ready = data;
  479. struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
  480. u16 expect_len;
  481. switch (tag) {
  482. case WMI_TAG_SERVICE_READY_EVENT:
  483. if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
  484. return -EINVAL;
  485. break;
  486. case WMI_TAG_ARRAY_UINT32:
  487. if (!svc_ready->wmi_svc_bitmap_done) {
  488. expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
  489. if (len < expect_len) {
  490. ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
  491. len, tag);
  492. return -EINVAL;
  493. }
  494. ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
  495. svc_ready->wmi_svc_bitmap_done = true;
  496. }
  497. break;
  498. default:
  499. break;
  500. }
  501. return 0;
  502. }
  503. static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
  504. {
  505. struct wmi_tlv_svc_ready_parse svc_ready = { };
  506. int ret;
  507. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  508. ath11k_wmi_tlv_svc_rdy_parse,
  509. &svc_ready);
  510. if (ret) {
  511. ath11k_warn(ab, "failed to parse tlv %d\n", ret);
  512. return ret;
  513. }
  514. return 0;
  515. }
  516. struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
  517. {
  518. struct sk_buff *skb;
  519. struct ath11k_base *ab = wmi_sc->ab;
  520. u32 round_len = roundup(len, 4);
  521. skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
  522. if (!skb)
  523. return NULL;
  524. skb_reserve(skb, WMI_SKB_HEADROOM);
  525. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  526. ath11k_warn(ab, "unaligned WMI skb data\n");
  527. skb_put(skb, round_len);
  528. memset(skb->data, 0, round_len);
  529. return skb;
  530. }
  531. static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
  532. struct ieee80211_tx_info *info)
  533. {
  534. struct ath11k_base *ab = ar->ab;
  535. u32 freq = 0;
  536. if (ab->hw_params.support_off_channel_tx &&
  537. ar->scan.is_roc &&
  538. (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
  539. freq = ar->scan.roc_freq;
  540. return freq;
  541. }
  542. int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
  543. struct sk_buff *frame)
  544. {
  545. struct ath11k_pdev_wmi *wmi = ar->wmi;
  546. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
  547. struct wmi_mgmt_send_cmd *cmd;
  548. struct wmi_tlv *frame_tlv;
  549. struct sk_buff *skb;
  550. u32 buf_len;
  551. int ret, len;
  552. buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
  553. frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
  554. len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
  555. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  556. if (!skb)
  557. return -ENOMEM;
  558. cmd = (struct wmi_mgmt_send_cmd *)skb->data;
  559. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
  560. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  561. cmd->vdev_id = vdev_id;
  562. cmd->desc_id = buf_id;
  563. cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
  564. cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
  565. cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
  566. cmd->frame_len = frame->len;
  567. cmd->buf_len = buf_len;
  568. cmd->tx_params_valid = 0;
  569. frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
  570. frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  571. FIELD_PREP(WMI_TLV_LEN, buf_len);
  572. memcpy(frame_tlv->value, frame->data, buf_len);
  573. ath11k_ce_byte_swap(frame_tlv->value, buf_len);
  574. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
  575. if (ret) {
  576. ath11k_warn(ar->ab,
  577. "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
  578. dev_kfree_skb(skb);
  579. }
  580. return ret;
  581. }
  582. int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
  583. struct vdev_create_params *param)
  584. {
  585. struct ath11k_pdev_wmi *wmi = ar->wmi;
  586. struct wmi_vdev_create_cmd *cmd;
  587. struct sk_buff *skb;
  588. struct wmi_vdev_txrx_streams *txrx_streams;
  589. struct wmi_tlv *tlv;
  590. int ret, len;
  591. void *ptr;
  592. /* It can be optimized my sending tx/rx chain configuration
  593. * only for supported bands instead of always sending it for
  594. * both the bands.
  595. */
  596. len = sizeof(*cmd) + TLV_HDR_SIZE +
  597. (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
  598. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  599. if (!skb)
  600. return -ENOMEM;
  601. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  602. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
  603. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  604. cmd->vdev_id = param->if_id;
  605. cmd->vdev_type = param->type;
  606. cmd->vdev_subtype = param->subtype;
  607. cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
  608. cmd->pdev_id = param->pdev_id;
  609. ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  610. ptr = skb->data + sizeof(*cmd);
  611. len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
  612. tlv = ptr;
  613. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  614. FIELD_PREP(WMI_TLV_LEN, len);
  615. ptr += TLV_HDR_SIZE;
  616. txrx_streams = ptr;
  617. len = sizeof(*txrx_streams);
  618. txrx_streams->tlv_header =
  619. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
  620. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  621. txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
  622. txrx_streams->supported_tx_streams =
  623. param->chains[NL80211_BAND_2GHZ].tx;
  624. txrx_streams->supported_rx_streams =
  625. param->chains[NL80211_BAND_2GHZ].rx;
  626. txrx_streams++;
  627. txrx_streams->tlv_header =
  628. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
  629. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  630. txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
  631. txrx_streams->supported_tx_streams =
  632. param->chains[NL80211_BAND_5GHZ].tx;
  633. txrx_streams->supported_rx_streams =
  634. param->chains[NL80211_BAND_5GHZ].rx;
  635. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
  636. if (ret) {
  637. ath11k_warn(ar->ab,
  638. "failed to submit WMI_VDEV_CREATE_CMDID\n");
  639. dev_kfree_skb(skb);
  640. }
  641. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  642. "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
  643. param->if_id, param->type, param->subtype,
  644. macaddr, param->pdev_id);
  645. return ret;
  646. }
  647. int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
  648. {
  649. struct ath11k_pdev_wmi *wmi = ar->wmi;
  650. struct wmi_vdev_delete_cmd *cmd;
  651. struct sk_buff *skb;
  652. int ret;
  653. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  654. if (!skb)
  655. return -ENOMEM;
  656. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  657. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
  658. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  659. cmd->vdev_id = vdev_id;
  660. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
  661. if (ret) {
  662. ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
  663. dev_kfree_skb(skb);
  664. }
  665. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
  666. return ret;
  667. }
  668. int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
  669. {
  670. struct ath11k_pdev_wmi *wmi = ar->wmi;
  671. struct wmi_vdev_stop_cmd *cmd;
  672. struct sk_buff *skb;
  673. int ret;
  674. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  675. if (!skb)
  676. return -ENOMEM;
  677. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  678. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
  679. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  680. cmd->vdev_id = vdev_id;
  681. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
  682. if (ret) {
  683. ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
  684. dev_kfree_skb(skb);
  685. }
  686. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
  687. return ret;
  688. }
  689. int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
  690. {
  691. struct ath11k_pdev_wmi *wmi = ar->wmi;
  692. struct wmi_vdev_down_cmd *cmd;
  693. struct sk_buff *skb;
  694. int ret;
  695. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  696. if (!skb)
  697. return -ENOMEM;
  698. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  699. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
  700. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  701. cmd->vdev_id = vdev_id;
  702. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
  703. if (ret) {
  704. ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
  705. dev_kfree_skb(skb);
  706. }
  707. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
  708. return ret;
  709. }
  710. static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
  711. struct wmi_vdev_start_req_arg *arg)
  712. {
  713. u32 center_freq1 = arg->channel.band_center_freq1;
  714. memset(chan, 0, sizeof(*chan));
  715. chan->mhz = arg->channel.freq;
  716. chan->band_center_freq1 = arg->channel.band_center_freq1;
  717. if (arg->channel.mode == MODE_11AX_HE160) {
  718. if (arg->channel.freq > arg->channel.band_center_freq1)
  719. chan->band_center_freq1 = center_freq1 + 40;
  720. else
  721. chan->band_center_freq1 = center_freq1 - 40;
  722. chan->band_center_freq2 = arg->channel.band_center_freq1;
  723. } else if (arg->channel.mode == MODE_11AC_VHT80_80) {
  724. chan->band_center_freq2 = arg->channel.band_center_freq2;
  725. } else {
  726. chan->band_center_freq2 = 0;
  727. }
  728. chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
  729. if (arg->channel.passive)
  730. chan->info |= WMI_CHAN_INFO_PASSIVE;
  731. if (arg->channel.allow_ibss)
  732. chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
  733. if (arg->channel.allow_ht)
  734. chan->info |= WMI_CHAN_INFO_ALLOW_HT;
  735. if (arg->channel.allow_vht)
  736. chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
  737. if (arg->channel.allow_he)
  738. chan->info |= WMI_CHAN_INFO_ALLOW_HE;
  739. if (arg->channel.ht40plus)
  740. chan->info |= WMI_CHAN_INFO_HT40_PLUS;
  741. if (arg->channel.chan_radar)
  742. chan->info |= WMI_CHAN_INFO_DFS;
  743. if (arg->channel.freq2_radar)
  744. chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
  745. chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
  746. arg->channel.max_power) |
  747. FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
  748. arg->channel.max_reg_power);
  749. chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
  750. arg->channel.max_antenna_gain) |
  751. FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
  752. arg->channel.max_power);
  753. }
  754. int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
  755. bool restart)
  756. {
  757. struct ath11k_pdev_wmi *wmi = ar->wmi;
  758. struct wmi_vdev_start_request_cmd *cmd;
  759. struct sk_buff *skb;
  760. struct wmi_channel *chan;
  761. struct wmi_tlv *tlv;
  762. void *ptr;
  763. int ret, len;
  764. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  765. return -EINVAL;
  766. len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
  767. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  768. if (!skb)
  769. return -ENOMEM;
  770. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  771. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  772. WMI_TAG_VDEV_START_REQUEST_CMD) |
  773. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  774. cmd->vdev_id = arg->vdev_id;
  775. cmd->beacon_interval = arg->bcn_intval;
  776. cmd->bcn_tx_rate = arg->bcn_tx_rate;
  777. cmd->dtim_period = arg->dtim_period;
  778. cmd->num_noa_descriptors = arg->num_noa_descriptors;
  779. cmd->preferred_rx_streams = arg->pref_rx_streams;
  780. cmd->preferred_tx_streams = arg->pref_tx_streams;
  781. cmd->cac_duration_ms = arg->cac_duration_ms;
  782. cmd->regdomain = arg->regdomain;
  783. cmd->he_ops = arg->he_ops;
  784. if (!restart) {
  785. if (arg->ssid) {
  786. cmd->ssid.ssid_len = arg->ssid_len;
  787. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  788. }
  789. if (arg->hidden_ssid)
  790. cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
  791. if (arg->pmf_enabled)
  792. cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
  793. }
  794. cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
  795. if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
  796. cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
  797. ptr = skb->data + sizeof(*cmd);
  798. chan = ptr;
  799. ath11k_wmi_put_wmi_channel(chan, arg);
  800. chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
  801. FIELD_PREP(WMI_TLV_LEN,
  802. sizeof(*chan) - TLV_HDR_SIZE);
  803. ptr += sizeof(*chan);
  804. tlv = ptr;
  805. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  806. FIELD_PREP(WMI_TLV_LEN, 0);
  807. /* Note: This is a nested TLV containing:
  808. * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
  809. */
  810. ptr += sizeof(*tlv);
  811. if (restart)
  812. ret = ath11k_wmi_cmd_send(wmi, skb,
  813. WMI_VDEV_RESTART_REQUEST_CMDID);
  814. else
  815. ret = ath11k_wmi_cmd_send(wmi, skb,
  816. WMI_VDEV_START_REQUEST_CMDID);
  817. if (ret) {
  818. ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
  819. restart ? "restart" : "start");
  820. dev_kfree_skb(skb);
  821. }
  822. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
  823. restart ? "restart" : "start", arg->vdev_id,
  824. arg->channel.freq, arg->channel.mode);
  825. return ret;
  826. }
  827. int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  828. {
  829. struct ath11k_pdev_wmi *wmi = ar->wmi;
  830. struct wmi_vdev_up_cmd *cmd;
  831. struct ieee80211_bss_conf *bss_conf;
  832. struct ath11k_vif *arvif;
  833. struct sk_buff *skb;
  834. int ret;
  835. arvif = ath11k_mac_get_arvif(ar, vdev_id);
  836. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  837. if (!skb)
  838. return -ENOMEM;
  839. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  840. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
  841. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  842. cmd->vdev_id = vdev_id;
  843. cmd->vdev_assoc_id = aid;
  844. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  845. if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
  846. bss_conf = &arvif->vif->bss_conf;
  847. if (bss_conf->nontransmitted) {
  848. ether_addr_copy(cmd->trans_bssid.addr,
  849. bss_conf->transmitter_bssid);
  850. cmd->profile_idx = bss_conf->bssid_index;
  851. cmd->profile_num = bss_conf->bssid_indicator;
  852. }
  853. }
  854. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
  855. if (ret) {
  856. ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
  857. dev_kfree_skb(skb);
  858. }
  859. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  860. "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  861. vdev_id, aid, bssid);
  862. return ret;
  863. }
  864. int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
  865. struct peer_create_params *param)
  866. {
  867. struct ath11k_pdev_wmi *wmi = ar->wmi;
  868. struct wmi_peer_create_cmd *cmd;
  869. struct sk_buff *skb;
  870. int ret;
  871. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  872. if (!skb)
  873. return -ENOMEM;
  874. cmd = (struct wmi_peer_create_cmd *)skb->data;
  875. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
  876. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  877. ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
  878. cmd->peer_type = param->peer_type;
  879. cmd->vdev_id = param->vdev_id;
  880. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
  881. if (ret) {
  882. ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
  883. dev_kfree_skb(skb);
  884. }
  885. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  886. "WMI peer create vdev_id %d peer_addr %pM\n",
  887. param->vdev_id, param->peer_addr);
  888. return ret;
  889. }
  890. int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
  891. const u8 *peer_addr, u8 vdev_id)
  892. {
  893. struct ath11k_pdev_wmi *wmi = ar->wmi;
  894. struct wmi_peer_delete_cmd *cmd;
  895. struct sk_buff *skb;
  896. int ret;
  897. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  898. if (!skb)
  899. return -ENOMEM;
  900. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  901. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
  902. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  903. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  904. cmd->vdev_id = vdev_id;
  905. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  906. "WMI peer delete vdev_id %d peer_addr %pM\n",
  907. vdev_id, peer_addr);
  908. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
  909. if (ret) {
  910. ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
  911. dev_kfree_skb(skb);
  912. }
  913. return ret;
  914. }
  915. int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
  916. struct pdev_set_regdomain_params *param)
  917. {
  918. struct ath11k_pdev_wmi *wmi = ar->wmi;
  919. struct wmi_pdev_set_regdomain_cmd *cmd;
  920. struct sk_buff *skb;
  921. int ret;
  922. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  923. if (!skb)
  924. return -ENOMEM;
  925. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  926. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  927. WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
  928. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  929. cmd->reg_domain = param->current_rd_in_use;
  930. cmd->reg_domain_2g = param->current_rd_2g;
  931. cmd->reg_domain_5g = param->current_rd_5g;
  932. cmd->conformance_test_limit_2g = param->ctl_2g;
  933. cmd->conformance_test_limit_5g = param->ctl_5g;
  934. cmd->dfs_domain = param->dfs_domain;
  935. cmd->pdev_id = param->pdev_id;
  936. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  937. "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
  938. param->current_rd_in_use, param->current_rd_2g,
  939. param->current_rd_5g, param->dfs_domain, param->pdev_id);
  940. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
  941. if (ret) {
  942. ath11k_warn(ar->ab,
  943. "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
  944. dev_kfree_skb(skb);
  945. }
  946. return ret;
  947. }
  948. int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
  949. u32 vdev_id, u32 param_id, u32 param_val)
  950. {
  951. struct ath11k_pdev_wmi *wmi = ar->wmi;
  952. struct wmi_peer_set_param_cmd *cmd;
  953. struct sk_buff *skb;
  954. int ret;
  955. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  956. if (!skb)
  957. return -ENOMEM;
  958. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  959. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
  960. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  961. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  962. cmd->vdev_id = vdev_id;
  963. cmd->param_id = param_id;
  964. cmd->param_value = param_val;
  965. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
  966. if (ret) {
  967. ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
  968. dev_kfree_skb(skb);
  969. }
  970. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  971. "WMI vdev %d peer 0x%pM set param %d value %d\n",
  972. vdev_id, peer_addr, param_id, param_val);
  973. return ret;
  974. }
  975. int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
  976. u8 peer_addr[ETH_ALEN],
  977. struct peer_flush_params *param)
  978. {
  979. struct ath11k_pdev_wmi *wmi = ar->wmi;
  980. struct wmi_peer_flush_tids_cmd *cmd;
  981. struct sk_buff *skb;
  982. int ret;
  983. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  984. if (!skb)
  985. return -ENOMEM;
  986. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  987. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
  988. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  989. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  990. cmd->peer_tid_bitmap = param->peer_tid_bitmap;
  991. cmd->vdev_id = param->vdev_id;
  992. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
  993. if (ret) {
  994. ath11k_warn(ar->ab,
  995. "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
  996. dev_kfree_skb(skb);
  997. }
  998. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  999. "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
  1000. param->vdev_id, peer_addr, param->peer_tid_bitmap);
  1001. return ret;
  1002. }
  1003. int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
  1004. int vdev_id, const u8 *addr,
  1005. dma_addr_t paddr, u8 tid,
  1006. u8 ba_window_size_valid,
  1007. u32 ba_window_size)
  1008. {
  1009. struct wmi_peer_reorder_queue_setup_cmd *cmd;
  1010. struct sk_buff *skb;
  1011. int ret;
  1012. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
  1013. if (!skb)
  1014. return -ENOMEM;
  1015. cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
  1016. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1017. WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
  1018. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1019. ether_addr_copy(cmd->peer_macaddr.addr, addr);
  1020. cmd->vdev_id = vdev_id;
  1021. cmd->tid = tid;
  1022. cmd->queue_ptr_lo = lower_32_bits(paddr);
  1023. cmd->queue_ptr_hi = upper_32_bits(paddr);
  1024. cmd->queue_no = tid;
  1025. cmd->ba_window_size_valid = ba_window_size_valid;
  1026. cmd->ba_window_size = ba_window_size;
  1027. ret = ath11k_wmi_cmd_send(ar->wmi, skb,
  1028. WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
  1029. if (ret) {
  1030. ath11k_warn(ar->ab,
  1031. "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
  1032. dev_kfree_skb(skb);
  1033. }
  1034. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1035. "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
  1036. addr, vdev_id, tid);
  1037. return ret;
  1038. }
  1039. int
  1040. ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
  1041. struct rx_reorder_queue_remove_params *param)
  1042. {
  1043. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1044. struct wmi_peer_reorder_queue_remove_cmd *cmd;
  1045. struct sk_buff *skb;
  1046. int ret;
  1047. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1048. if (!skb)
  1049. return -ENOMEM;
  1050. cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
  1051. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1052. WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
  1053. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1054. ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
  1055. cmd->vdev_id = param->vdev_id;
  1056. cmd->tid_mask = param->peer_tid_bitmap;
  1057. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1058. "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
  1059. param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
  1060. ret = ath11k_wmi_cmd_send(wmi, skb,
  1061. WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
  1062. if (ret) {
  1063. ath11k_warn(ar->ab,
  1064. "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
  1065. dev_kfree_skb(skb);
  1066. }
  1067. return ret;
  1068. }
  1069. int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
  1070. u32 param_value, u8 pdev_id)
  1071. {
  1072. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1073. struct wmi_pdev_set_param_cmd *cmd;
  1074. struct sk_buff *skb;
  1075. int ret;
  1076. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1077. if (!skb)
  1078. return -ENOMEM;
  1079. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  1080. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
  1081. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1082. cmd->pdev_id = pdev_id;
  1083. cmd->param_id = param_id;
  1084. cmd->param_value = param_value;
  1085. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
  1086. if (ret) {
  1087. ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
  1088. dev_kfree_skb(skb);
  1089. }
  1090. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1091. "WMI pdev set param %d pdev id %d value %d\n",
  1092. param_id, pdev_id, param_value);
  1093. return ret;
  1094. }
  1095. int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
  1096. enum wmi_sta_ps_mode psmode)
  1097. {
  1098. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1099. struct wmi_pdev_set_ps_mode_cmd *cmd;
  1100. struct sk_buff *skb;
  1101. int ret;
  1102. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1103. if (!skb)
  1104. return -ENOMEM;
  1105. cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
  1106. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
  1107. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1108. cmd->vdev_id = vdev_id;
  1109. cmd->sta_ps_mode = psmode;
  1110. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
  1111. if (ret) {
  1112. ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
  1113. dev_kfree_skb(skb);
  1114. }
  1115. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1116. "WMI vdev set psmode %d vdev id %d\n",
  1117. psmode, vdev_id);
  1118. return ret;
  1119. }
  1120. int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
  1121. u32 pdev_id)
  1122. {
  1123. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1124. struct wmi_pdev_suspend_cmd *cmd;
  1125. struct sk_buff *skb;
  1126. int ret;
  1127. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1128. if (!skb)
  1129. return -ENOMEM;
  1130. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  1131. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
  1132. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1133. cmd->suspend_opt = suspend_opt;
  1134. cmd->pdev_id = pdev_id;
  1135. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
  1136. if (ret) {
  1137. ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
  1138. dev_kfree_skb(skb);
  1139. }
  1140. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1141. "WMI pdev suspend pdev_id %d\n", pdev_id);
  1142. return ret;
  1143. }
  1144. int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
  1145. {
  1146. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1147. struct wmi_pdev_resume_cmd *cmd;
  1148. struct sk_buff *skb;
  1149. int ret;
  1150. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1151. if (!skb)
  1152. return -ENOMEM;
  1153. cmd = (struct wmi_pdev_resume_cmd *)skb->data;
  1154. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
  1155. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1156. cmd->pdev_id = pdev_id;
  1157. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1158. "WMI pdev resume pdev id %d\n", pdev_id);
  1159. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
  1160. if (ret) {
  1161. ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
  1162. dev_kfree_skb(skb);
  1163. }
  1164. return ret;
  1165. }
  1166. /* TODO FW Support for the cmd is not available yet.
  1167. * Can be tested once the command and corresponding
  1168. * event is implemented in FW
  1169. */
  1170. int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
  1171. enum wmi_bss_chan_info_req_type type)
  1172. {
  1173. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1174. struct wmi_pdev_bss_chan_info_req_cmd *cmd;
  1175. struct sk_buff *skb;
  1176. int ret;
  1177. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1178. if (!skb)
  1179. return -ENOMEM;
  1180. cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
  1181. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1182. WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
  1183. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1184. cmd->req_type = type;
  1185. cmd->pdev_id = ar->pdev->pdev_id;
  1186. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1187. "WMI bss chan info req type %d\n", type);
  1188. ret = ath11k_wmi_cmd_send(wmi, skb,
  1189. WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
  1190. if (ret) {
  1191. ath11k_warn(ar->ab,
  1192. "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
  1193. dev_kfree_skb(skb);
  1194. }
  1195. return ret;
  1196. }
  1197. int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
  1198. struct ap_ps_params *param)
  1199. {
  1200. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1201. struct wmi_ap_ps_peer_cmd *cmd;
  1202. struct sk_buff *skb;
  1203. int ret;
  1204. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1205. if (!skb)
  1206. return -ENOMEM;
  1207. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  1208. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
  1209. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1210. cmd->vdev_id = param->vdev_id;
  1211. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  1212. cmd->param = param->param;
  1213. cmd->value = param->value;
  1214. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
  1215. if (ret) {
  1216. ath11k_warn(ar->ab,
  1217. "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
  1218. dev_kfree_skb(skb);
  1219. }
  1220. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1221. "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
  1222. param->vdev_id, peer_addr, param->param, param->value);
  1223. return ret;
  1224. }
  1225. int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
  1226. u32 param, u32 param_value)
  1227. {
  1228. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1229. struct wmi_sta_powersave_param_cmd *cmd;
  1230. struct sk_buff *skb;
  1231. int ret;
  1232. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1233. if (!skb)
  1234. return -ENOMEM;
  1235. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  1236. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1237. WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
  1238. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1239. cmd->vdev_id = vdev_id;
  1240. cmd->param = param;
  1241. cmd->value = param_value;
  1242. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1243. "WMI set sta ps vdev_id %d param %d value %d\n",
  1244. vdev_id, param, param_value);
  1245. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
  1246. if (ret) {
  1247. ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
  1248. dev_kfree_skb(skb);
  1249. }
  1250. return ret;
  1251. }
  1252. int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
  1253. {
  1254. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1255. struct wmi_force_fw_hang_cmd *cmd;
  1256. struct sk_buff *skb;
  1257. int ret, len;
  1258. len = sizeof(*cmd);
  1259. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  1260. if (!skb)
  1261. return -ENOMEM;
  1262. cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  1263. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
  1264. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  1265. cmd->type = type;
  1266. cmd->delay_time_ms = delay_time_ms;
  1267. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
  1268. if (ret) {
  1269. ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
  1270. dev_kfree_skb(skb);
  1271. }
  1272. return ret;
  1273. }
  1274. int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
  1275. u32 param_id, u32 param_value)
  1276. {
  1277. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1278. struct wmi_vdev_set_param_cmd *cmd;
  1279. struct sk_buff *skb;
  1280. int ret;
  1281. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1282. if (!skb)
  1283. return -ENOMEM;
  1284. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  1285. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
  1286. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1287. cmd->vdev_id = vdev_id;
  1288. cmd->param_id = param_id;
  1289. cmd->param_value = param_value;
  1290. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
  1291. if (ret) {
  1292. ath11k_warn(ar->ab,
  1293. "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
  1294. dev_kfree_skb(skb);
  1295. }
  1296. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1297. "WMI vdev id 0x%x set param %d value %d\n",
  1298. vdev_id, param_id, param_value);
  1299. return ret;
  1300. }
  1301. int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
  1302. struct stats_request_params *param)
  1303. {
  1304. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1305. struct wmi_request_stats_cmd *cmd;
  1306. struct sk_buff *skb;
  1307. int ret;
  1308. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1309. if (!skb)
  1310. return -ENOMEM;
  1311. cmd = (struct wmi_request_stats_cmd *)skb->data;
  1312. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
  1313. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1314. cmd->stats_id = param->stats_id;
  1315. cmd->vdev_id = param->vdev_id;
  1316. cmd->pdev_id = param->pdev_id;
  1317. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
  1318. if (ret) {
  1319. ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
  1320. dev_kfree_skb(skb);
  1321. }
  1322. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1323. "WMI request stats 0x%x vdev id %d pdev id %d\n",
  1324. param->stats_id, param->vdev_id, param->pdev_id);
  1325. return ret;
  1326. }
  1327. int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
  1328. {
  1329. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1330. struct wmi_get_pdev_temperature_cmd *cmd;
  1331. struct sk_buff *skb;
  1332. int ret;
  1333. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1334. if (!skb)
  1335. return -ENOMEM;
  1336. cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
  1337. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
  1338. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1339. cmd->pdev_id = ar->pdev->pdev_id;
  1340. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
  1341. if (ret) {
  1342. ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
  1343. dev_kfree_skb(skb);
  1344. }
  1345. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1346. "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
  1347. return ret;
  1348. }
  1349. int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
  1350. u32 vdev_id, u32 bcn_ctrl_op)
  1351. {
  1352. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1353. struct wmi_bcn_offload_ctrl_cmd *cmd;
  1354. struct sk_buff *skb;
  1355. int ret;
  1356. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1357. if (!skb)
  1358. return -ENOMEM;
  1359. cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
  1360. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1361. WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
  1362. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1363. cmd->vdev_id = vdev_id;
  1364. cmd->bcn_ctrl_op = bcn_ctrl_op;
  1365. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1366. "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
  1367. vdev_id, bcn_ctrl_op);
  1368. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
  1369. if (ret) {
  1370. ath11k_warn(ar->ab,
  1371. "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
  1372. dev_kfree_skb(skb);
  1373. }
  1374. return ret;
  1375. }
  1376. int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
  1377. struct ieee80211_mutable_offsets *offs,
  1378. struct sk_buff *bcn)
  1379. {
  1380. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1381. struct wmi_bcn_tmpl_cmd *cmd;
  1382. struct wmi_bcn_prb_info *bcn_prb_info;
  1383. struct wmi_tlv *tlv;
  1384. struct sk_buff *skb;
  1385. void *ptr;
  1386. int ret, len;
  1387. size_t aligned_len = roundup(bcn->len, 4);
  1388. struct ieee80211_vif *vif;
  1389. struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
  1390. if (!arvif) {
  1391. ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
  1392. return -EINVAL;
  1393. }
  1394. vif = arvif->vif;
  1395. len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
  1396. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  1397. if (!skb)
  1398. return -ENOMEM;
  1399. cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
  1400. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
  1401. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1402. cmd->vdev_id = vdev_id;
  1403. cmd->tim_ie_offset = offs->tim_offset;
  1404. if (vif->bss_conf.csa_active) {
  1405. cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
  1406. cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
  1407. }
  1408. cmd->buf_len = bcn->len;
  1409. ptr = skb->data + sizeof(*cmd);
  1410. bcn_prb_info = ptr;
  1411. len = sizeof(*bcn_prb_info);
  1412. bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1413. WMI_TAG_BCN_PRB_INFO) |
  1414. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  1415. bcn_prb_info->caps = 0;
  1416. bcn_prb_info->erp = 0;
  1417. ptr += sizeof(*bcn_prb_info);
  1418. tlv = ptr;
  1419. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  1420. FIELD_PREP(WMI_TLV_LEN, aligned_len);
  1421. memcpy(tlv->value, bcn->data, bcn->len);
  1422. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
  1423. if (ret) {
  1424. ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
  1425. dev_kfree_skb(skb);
  1426. }
  1427. return ret;
  1428. }
  1429. int ath11k_wmi_vdev_install_key(struct ath11k *ar,
  1430. struct wmi_vdev_install_key_arg *arg)
  1431. {
  1432. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1433. struct wmi_vdev_install_key_cmd *cmd;
  1434. struct wmi_tlv *tlv;
  1435. struct sk_buff *skb;
  1436. int ret, len;
  1437. int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
  1438. len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
  1439. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  1440. if (!skb)
  1441. return -ENOMEM;
  1442. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  1443. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
  1444. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1445. cmd->vdev_id = arg->vdev_id;
  1446. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  1447. cmd->key_idx = arg->key_idx;
  1448. cmd->key_flags = arg->key_flags;
  1449. cmd->key_cipher = arg->key_cipher;
  1450. cmd->key_len = arg->key_len;
  1451. cmd->key_txmic_len = arg->key_txmic_len;
  1452. cmd->key_rxmic_len = arg->key_rxmic_len;
  1453. if (arg->key_rsc_counter)
  1454. memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
  1455. sizeof(struct wmi_key_seq_counter));
  1456. tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
  1457. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  1458. FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
  1459. if (arg->key_data)
  1460. memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
  1461. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
  1462. if (ret) {
  1463. ath11k_warn(ar->ab,
  1464. "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
  1465. dev_kfree_skb(skb);
  1466. }
  1467. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1468. "WMI vdev install key idx %d cipher %d len %d\n",
  1469. arg->key_idx, arg->key_cipher, arg->key_len);
  1470. return ret;
  1471. }
  1472. static inline void
  1473. ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
  1474. struct peer_assoc_params *param,
  1475. bool hw_crypto_disabled)
  1476. {
  1477. cmd->peer_flags = 0;
  1478. if (param->is_wme_set) {
  1479. if (param->qos_flag)
  1480. cmd->peer_flags |= WMI_PEER_QOS;
  1481. if (param->apsd_flag)
  1482. cmd->peer_flags |= WMI_PEER_APSD;
  1483. if (param->ht_flag)
  1484. cmd->peer_flags |= WMI_PEER_HT;
  1485. if (param->bw_40)
  1486. cmd->peer_flags |= WMI_PEER_40MHZ;
  1487. if (param->bw_80)
  1488. cmd->peer_flags |= WMI_PEER_80MHZ;
  1489. if (param->bw_160)
  1490. cmd->peer_flags |= WMI_PEER_160MHZ;
  1491. /* Typically if STBC is enabled for VHT it should be enabled
  1492. * for HT as well
  1493. **/
  1494. if (param->stbc_flag)
  1495. cmd->peer_flags |= WMI_PEER_STBC;
  1496. /* Typically if LDPC is enabled for VHT it should be enabled
  1497. * for HT as well
  1498. **/
  1499. if (param->ldpc_flag)
  1500. cmd->peer_flags |= WMI_PEER_LDPC;
  1501. if (param->static_mimops_flag)
  1502. cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
  1503. if (param->dynamic_mimops_flag)
  1504. cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
  1505. if (param->spatial_mux_flag)
  1506. cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
  1507. if (param->vht_flag)
  1508. cmd->peer_flags |= WMI_PEER_VHT;
  1509. if (param->he_flag)
  1510. cmd->peer_flags |= WMI_PEER_HE;
  1511. if (param->twt_requester)
  1512. cmd->peer_flags |= WMI_PEER_TWT_REQ;
  1513. if (param->twt_responder)
  1514. cmd->peer_flags |= WMI_PEER_TWT_RESP;
  1515. }
  1516. /* Suppress authorization for all AUTH modes that need 4-way handshake
  1517. * (during re-association).
  1518. * Authorization will be done for these modes on key installation.
  1519. */
  1520. if (param->auth_flag)
  1521. cmd->peer_flags |= WMI_PEER_AUTH;
  1522. if (param->need_ptk_4_way) {
  1523. cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
  1524. if (!hw_crypto_disabled && param->is_assoc)
  1525. cmd->peer_flags &= ~WMI_PEER_AUTH;
  1526. }
  1527. if (param->need_gtk_2_way)
  1528. cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
  1529. /* safe mode bypass the 4-way handshake */
  1530. if (param->safe_mode_enabled)
  1531. cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
  1532. WMI_PEER_NEED_GTK_2_WAY);
  1533. if (param->is_pmf_enabled)
  1534. cmd->peer_flags |= WMI_PEER_PMF;
  1535. /* Disable AMSDU for station transmit, if user configures it */
  1536. /* Disable AMSDU for AP transmit to 11n Stations, if user configures
  1537. * it
  1538. * if (param->amsdu_disable) Add after FW support
  1539. **/
  1540. /* Target asserts if node is marked HT and all MCS is set to 0.
  1541. * Mark the node as non-HT if all the mcs rates are disabled through
  1542. * iwpriv
  1543. **/
  1544. if (param->peer_ht_rates.num_rates == 0)
  1545. cmd->peer_flags &= ~WMI_PEER_HT;
  1546. }
  1547. int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
  1548. struct peer_assoc_params *param)
  1549. {
  1550. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1551. struct wmi_peer_assoc_complete_cmd *cmd;
  1552. struct wmi_vht_rate_set *mcs;
  1553. struct wmi_he_rate_set *he_mcs;
  1554. struct sk_buff *skb;
  1555. struct wmi_tlv *tlv;
  1556. void *ptr;
  1557. u32 peer_legacy_rates_align;
  1558. u32 peer_ht_rates_align;
  1559. int i, ret, len;
  1560. peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
  1561. sizeof(u32));
  1562. peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
  1563. sizeof(u32));
  1564. len = sizeof(*cmd) +
  1565. TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
  1566. TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
  1567. sizeof(*mcs) + TLV_HDR_SIZE +
  1568. (sizeof(*he_mcs) * param->peer_he_mcs_count);
  1569. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  1570. if (!skb)
  1571. return -ENOMEM;
  1572. ptr = skb->data;
  1573. cmd = ptr;
  1574. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1575. WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
  1576. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1577. cmd->vdev_id = param->vdev_id;
  1578. cmd->peer_new_assoc = param->peer_new_assoc;
  1579. cmd->peer_associd = param->peer_associd;
  1580. ath11k_wmi_copy_peer_flags(cmd, param,
  1581. test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED,
  1582. &ar->ab->dev_flags));
  1583. ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
  1584. cmd->peer_rate_caps = param->peer_rate_caps;
  1585. cmd->peer_caps = param->peer_caps;
  1586. cmd->peer_listen_intval = param->peer_listen_intval;
  1587. cmd->peer_ht_caps = param->peer_ht_caps;
  1588. cmd->peer_max_mpdu = param->peer_max_mpdu;
  1589. cmd->peer_mpdu_density = param->peer_mpdu_density;
  1590. cmd->peer_vht_caps = param->peer_vht_caps;
  1591. cmd->peer_phymode = param->peer_phymode;
  1592. /* Update 11ax capabilities */
  1593. cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
  1594. cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
  1595. cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
  1596. cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
  1597. cmd->peer_he_ops = param->peer_he_ops;
  1598. memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
  1599. sizeof(param->peer_he_cap_phyinfo));
  1600. memcpy(&cmd->peer_ppet, &param->peer_ppet,
  1601. sizeof(param->peer_ppet));
  1602. /* Update peer legacy rate information */
  1603. ptr += sizeof(*cmd);
  1604. tlv = ptr;
  1605. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  1606. FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
  1607. ptr += TLV_HDR_SIZE;
  1608. cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
  1609. memcpy(ptr, param->peer_legacy_rates.rates,
  1610. param->peer_legacy_rates.num_rates);
  1611. /* Update peer HT rate information */
  1612. ptr += peer_legacy_rates_align;
  1613. tlv = ptr;
  1614. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  1615. FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
  1616. ptr += TLV_HDR_SIZE;
  1617. cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
  1618. memcpy(ptr, param->peer_ht_rates.rates,
  1619. param->peer_ht_rates.num_rates);
  1620. /* VHT Rates */
  1621. ptr += peer_ht_rates_align;
  1622. mcs = ptr;
  1623. mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
  1624. FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
  1625. cmd->peer_nss = param->peer_nss;
  1626. /* Update bandwidth-NSS mapping */
  1627. cmd->peer_bw_rxnss_override = 0;
  1628. cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
  1629. if (param->vht_capable) {
  1630. mcs->rx_max_rate = param->rx_max_rate;
  1631. mcs->rx_mcs_set = param->rx_mcs_set;
  1632. mcs->tx_max_rate = param->tx_max_rate;
  1633. mcs->tx_mcs_set = param->tx_mcs_set;
  1634. }
  1635. /* HE Rates */
  1636. cmd->peer_he_mcs = param->peer_he_mcs_count;
  1637. cmd->min_data_rate = param->min_data_rate;
  1638. ptr += sizeof(*mcs);
  1639. len = param->peer_he_mcs_count * sizeof(*he_mcs);
  1640. tlv = ptr;
  1641. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  1642. FIELD_PREP(WMI_TLV_LEN, len);
  1643. ptr += TLV_HDR_SIZE;
  1644. /* Loop through the HE rate set */
  1645. for (i = 0; i < param->peer_he_mcs_count; i++) {
  1646. he_mcs = ptr;
  1647. he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  1648. WMI_TAG_HE_RATE_SET) |
  1649. FIELD_PREP(WMI_TLV_LEN,
  1650. sizeof(*he_mcs) - TLV_HDR_SIZE);
  1651. he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
  1652. he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
  1653. ptr += sizeof(*he_mcs);
  1654. }
  1655. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
  1656. if (ret) {
  1657. ath11k_warn(ar->ab,
  1658. "failed to send WMI_PEER_ASSOC_CMDID\n");
  1659. dev_kfree_skb(skb);
  1660. }
  1661. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  1662. "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
  1663. cmd->vdev_id, cmd->peer_associd, param->peer_mac,
  1664. cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
  1665. cmd->peer_listen_intval, cmd->peer_ht_caps,
  1666. cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
  1667. cmd->peer_mpdu_density,
  1668. cmd->peer_vht_caps, cmd->peer_he_cap_info,
  1669. cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
  1670. cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
  1671. cmd->peer_he_cap_phy[2],
  1672. cmd->peer_bw_rxnss_override);
  1673. return ret;
  1674. }
  1675. void ath11k_wmi_start_scan_init(struct ath11k *ar,
  1676. struct scan_req_params *arg)
  1677. {
  1678. /* setup commonly used values */
  1679. arg->scan_req_id = 1;
  1680. if (ar->state_11d == ATH11K_11D_PREPARING)
  1681. arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
  1682. else
  1683. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  1684. arg->dwell_time_active = 50;
  1685. arg->dwell_time_active_2g = 0;
  1686. arg->dwell_time_passive = 150;
  1687. arg->dwell_time_active_6g = 40;
  1688. arg->dwell_time_passive_6g = 30;
  1689. arg->min_rest_time = 50;
  1690. arg->max_rest_time = 500;
  1691. arg->repeat_probe_time = 0;
  1692. arg->probe_spacing_time = 0;
  1693. arg->idle_time = 0;
  1694. arg->max_scan_time = 20000;
  1695. arg->probe_delay = 5;
  1696. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
  1697. WMI_SCAN_EVENT_COMPLETED |
  1698. WMI_SCAN_EVENT_BSS_CHANNEL |
  1699. WMI_SCAN_EVENT_FOREIGN_CHAN |
  1700. WMI_SCAN_EVENT_DEQUEUED;
  1701. arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  1702. arg->num_bssid = 1;
  1703. /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
  1704. * ZEROs in probe request
  1705. */
  1706. eth_broadcast_addr(arg->bssid_list[0].addr);
  1707. }
  1708. static inline void
  1709. ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
  1710. struct scan_req_params *param)
  1711. {
  1712. /* Scan events subscription */
  1713. if (param->scan_ev_started)
  1714. cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
  1715. if (param->scan_ev_completed)
  1716. cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
  1717. if (param->scan_ev_bss_chan)
  1718. cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
  1719. if (param->scan_ev_foreign_chan)
  1720. cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
  1721. if (param->scan_ev_dequeued)
  1722. cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
  1723. if (param->scan_ev_preempted)
  1724. cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
  1725. if (param->scan_ev_start_failed)
  1726. cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
  1727. if (param->scan_ev_restarted)
  1728. cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
  1729. if (param->scan_ev_foreign_chn_exit)
  1730. cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
  1731. if (param->scan_ev_suspended)
  1732. cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
  1733. if (param->scan_ev_resumed)
  1734. cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
  1735. /** Set scan control flags */
  1736. cmd->scan_ctrl_flags = 0;
  1737. if (param->scan_f_passive)
  1738. cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
  1739. if (param->scan_f_strict_passive_pch)
  1740. cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
  1741. if (param->scan_f_promisc_mode)
  1742. cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
  1743. if (param->scan_f_capture_phy_err)
  1744. cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
  1745. if (param->scan_f_half_rate)
  1746. cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
  1747. if (param->scan_f_quarter_rate)
  1748. cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
  1749. if (param->scan_f_cck_rates)
  1750. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
  1751. if (param->scan_f_ofdm_rates)
  1752. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  1753. if (param->scan_f_chan_stat_evnt)
  1754. cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  1755. if (param->scan_f_filter_prb_req)
  1756. cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
  1757. if (param->scan_f_bcast_probe)
  1758. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
  1759. if (param->scan_f_offchan_mgmt_tx)
  1760. cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
  1761. if (param->scan_f_offchan_data_tx)
  1762. cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
  1763. if (param->scan_f_force_active_dfs_chn)
  1764. cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
  1765. if (param->scan_f_add_tpc_ie_in_probe)
  1766. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
  1767. if (param->scan_f_add_ds_ie_in_probe)
  1768. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
  1769. if (param->scan_f_add_spoofed_mac_in_probe)
  1770. cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
  1771. if (param->scan_f_add_rand_seq_in_probe)
  1772. cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
  1773. if (param->scan_f_en_ie_whitelist_in_probe)
  1774. cmd->scan_ctrl_flags |=
  1775. WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
  1776. /* for adaptive scan mode using 3 bits (21 - 23 bits) */
  1777. WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
  1778. param->adaptive_dwell_time_mode);
  1779. }
  1780. int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
  1781. struct scan_req_params *params)
  1782. {
  1783. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1784. struct wmi_start_scan_cmd *cmd;
  1785. struct wmi_ssid *ssid = NULL;
  1786. struct wmi_mac_addr *bssid;
  1787. struct sk_buff *skb;
  1788. struct wmi_tlv *tlv;
  1789. void *ptr;
  1790. int i, ret, len;
  1791. u32 *tmp_ptr;
  1792. u16 extraie_len_with_pad = 0;
  1793. struct hint_short_ssid *s_ssid = NULL;
  1794. struct hint_bssid *hint_bssid = NULL;
  1795. len = sizeof(*cmd);
  1796. len += TLV_HDR_SIZE;
  1797. if (params->num_chan)
  1798. len += params->num_chan * sizeof(u32);
  1799. len += TLV_HDR_SIZE;
  1800. if (params->num_ssids)
  1801. len += params->num_ssids * sizeof(*ssid);
  1802. len += TLV_HDR_SIZE;
  1803. if (params->num_bssid)
  1804. len += sizeof(*bssid) * params->num_bssid;
  1805. len += TLV_HDR_SIZE;
  1806. if (params->extraie.len && params->extraie.len <= 0xFFFF)
  1807. extraie_len_with_pad =
  1808. roundup(params->extraie.len, sizeof(u32));
  1809. len += extraie_len_with_pad;
  1810. if (params->num_hint_bssid)
  1811. len += TLV_HDR_SIZE +
  1812. params->num_hint_bssid * sizeof(struct hint_bssid);
  1813. if (params->num_hint_s_ssid)
  1814. len += TLV_HDR_SIZE +
  1815. params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
  1816. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  1817. if (!skb)
  1818. return -ENOMEM;
  1819. ptr = skb->data;
  1820. cmd = ptr;
  1821. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
  1822. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1823. cmd->scan_id = params->scan_id;
  1824. cmd->scan_req_id = params->scan_req_id;
  1825. cmd->vdev_id = params->vdev_id;
  1826. cmd->scan_priority = params->scan_priority;
  1827. cmd->notify_scan_events = params->notify_scan_events;
  1828. ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
  1829. cmd->dwell_time_active = params->dwell_time_active;
  1830. cmd->dwell_time_active_2g = params->dwell_time_active_2g;
  1831. cmd->dwell_time_passive = params->dwell_time_passive;
  1832. cmd->dwell_time_active_6g = params->dwell_time_active_6g;
  1833. cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
  1834. cmd->min_rest_time = params->min_rest_time;
  1835. cmd->max_rest_time = params->max_rest_time;
  1836. cmd->repeat_probe_time = params->repeat_probe_time;
  1837. cmd->probe_spacing_time = params->probe_spacing_time;
  1838. cmd->idle_time = params->idle_time;
  1839. cmd->max_scan_time = params->max_scan_time;
  1840. cmd->probe_delay = params->probe_delay;
  1841. cmd->burst_duration = params->burst_duration;
  1842. cmd->num_chan = params->num_chan;
  1843. cmd->num_bssid = params->num_bssid;
  1844. cmd->num_ssids = params->num_ssids;
  1845. cmd->ie_len = params->extraie.len;
  1846. cmd->n_probes = params->n_probes;
  1847. ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
  1848. ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
  1849. ptr += sizeof(*cmd);
  1850. len = params->num_chan * sizeof(u32);
  1851. tlv = ptr;
  1852. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
  1853. FIELD_PREP(WMI_TLV_LEN, len);
  1854. ptr += TLV_HDR_SIZE;
  1855. tmp_ptr = (u32 *)ptr;
  1856. for (i = 0; i < params->num_chan; ++i)
  1857. tmp_ptr[i] = params->chan_list[i];
  1858. ptr += len;
  1859. len = params->num_ssids * sizeof(*ssid);
  1860. tlv = ptr;
  1861. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
  1862. FIELD_PREP(WMI_TLV_LEN, len);
  1863. ptr += TLV_HDR_SIZE;
  1864. if (params->num_ssids) {
  1865. ssid = ptr;
  1866. for (i = 0; i < params->num_ssids; ++i) {
  1867. ssid->ssid_len = params->ssid[i].length;
  1868. memcpy(ssid->ssid, params->ssid[i].ssid,
  1869. params->ssid[i].length);
  1870. ssid++;
  1871. }
  1872. }
  1873. ptr += (params->num_ssids * sizeof(*ssid));
  1874. len = params->num_bssid * sizeof(*bssid);
  1875. tlv = ptr;
  1876. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
  1877. FIELD_PREP(WMI_TLV_LEN, len);
  1878. ptr += TLV_HDR_SIZE;
  1879. bssid = ptr;
  1880. if (params->num_bssid) {
  1881. for (i = 0; i < params->num_bssid; ++i) {
  1882. ether_addr_copy(bssid->addr,
  1883. params->bssid_list[i].addr);
  1884. bssid++;
  1885. }
  1886. }
  1887. ptr += params->num_bssid * sizeof(*bssid);
  1888. len = extraie_len_with_pad;
  1889. tlv = ptr;
  1890. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  1891. FIELD_PREP(WMI_TLV_LEN, len);
  1892. ptr += TLV_HDR_SIZE;
  1893. if (extraie_len_with_pad)
  1894. memcpy(ptr, params->extraie.ptr,
  1895. params->extraie.len);
  1896. ptr += extraie_len_with_pad;
  1897. if (params->num_hint_s_ssid) {
  1898. len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
  1899. tlv = ptr;
  1900. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
  1901. FIELD_PREP(WMI_TLV_LEN, len);
  1902. ptr += TLV_HDR_SIZE;
  1903. s_ssid = ptr;
  1904. for (i = 0; i < params->num_hint_s_ssid; ++i) {
  1905. s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
  1906. s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
  1907. s_ssid++;
  1908. }
  1909. ptr += len;
  1910. }
  1911. if (params->num_hint_bssid) {
  1912. len = params->num_hint_bssid * sizeof(struct hint_bssid);
  1913. tlv = ptr;
  1914. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
  1915. FIELD_PREP(WMI_TLV_LEN, len);
  1916. ptr += TLV_HDR_SIZE;
  1917. hint_bssid = ptr;
  1918. for (i = 0; i < params->num_hint_bssid; ++i) {
  1919. hint_bssid->freq_flags =
  1920. params->hint_bssid[i].freq_flags;
  1921. ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
  1922. &hint_bssid->bssid.addr[0]);
  1923. hint_bssid++;
  1924. }
  1925. }
  1926. ret = ath11k_wmi_cmd_send(wmi, skb,
  1927. WMI_START_SCAN_CMDID);
  1928. if (ret) {
  1929. ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
  1930. dev_kfree_skb(skb);
  1931. }
  1932. return ret;
  1933. }
  1934. int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
  1935. struct scan_cancel_param *param)
  1936. {
  1937. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1938. struct wmi_stop_scan_cmd *cmd;
  1939. struct sk_buff *skb;
  1940. int ret;
  1941. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  1942. if (!skb)
  1943. return -ENOMEM;
  1944. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  1945. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
  1946. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  1947. cmd->vdev_id = param->vdev_id;
  1948. cmd->requestor = param->requester;
  1949. cmd->scan_id = param->scan_id;
  1950. cmd->pdev_id = param->pdev_id;
  1951. /* stop the scan with the corresponding scan_id */
  1952. if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
  1953. /* Cancelling all scans */
  1954. cmd->req_type = WMI_SCAN_STOP_ALL;
  1955. } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
  1956. /* Cancelling VAP scans */
  1957. cmd->req_type = WMI_SCN_STOP_VAP_ALL;
  1958. } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
  1959. /* Cancelling specific scan */
  1960. cmd->req_type = WMI_SCAN_STOP_ONE;
  1961. } else {
  1962. ath11k_warn(ar->ab, "invalid scan cancel param %d",
  1963. param->req_type);
  1964. dev_kfree_skb(skb);
  1965. return -EINVAL;
  1966. }
  1967. ret = ath11k_wmi_cmd_send(wmi, skb,
  1968. WMI_STOP_SCAN_CMDID);
  1969. if (ret) {
  1970. ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
  1971. dev_kfree_skb(skb);
  1972. }
  1973. return ret;
  1974. }
  1975. int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
  1976. struct scan_chan_list_params *chan_list)
  1977. {
  1978. struct ath11k_pdev_wmi *wmi = ar->wmi;
  1979. struct wmi_scan_chan_list_cmd *cmd;
  1980. struct sk_buff *skb;
  1981. struct wmi_channel *chan_info;
  1982. struct channel_param *tchan_info;
  1983. struct wmi_tlv *tlv;
  1984. void *ptr;
  1985. int i, ret, len;
  1986. u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
  1987. u32 *reg1, *reg2;
  1988. tchan_info = chan_list->ch_param;
  1989. while (chan_list->nallchans) {
  1990. len = sizeof(*cmd) + TLV_HDR_SIZE;
  1991. max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
  1992. sizeof(*chan_info);
  1993. if (chan_list->nallchans > max_chan_limit)
  1994. num_send_chans = max_chan_limit;
  1995. else
  1996. num_send_chans = chan_list->nallchans;
  1997. chan_list->nallchans -= num_send_chans;
  1998. len += sizeof(*chan_info) * num_send_chans;
  1999. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2000. if (!skb)
  2001. return -ENOMEM;
  2002. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  2003. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
  2004. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2005. cmd->pdev_id = chan_list->pdev_id;
  2006. cmd->num_scan_chans = num_send_chans;
  2007. if (num_sends)
  2008. cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
  2009. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2010. "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
  2011. num_send_chans, len, cmd->pdev_id, num_sends);
  2012. ptr = skb->data + sizeof(*cmd);
  2013. len = sizeof(*chan_info) * num_send_chans;
  2014. tlv = ptr;
  2015. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  2016. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2017. ptr += TLV_HDR_SIZE;
  2018. for (i = 0; i < num_send_chans; ++i) {
  2019. chan_info = ptr;
  2020. memset(chan_info, 0, sizeof(*chan_info));
  2021. len = sizeof(*chan_info);
  2022. chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2023. WMI_TAG_CHANNEL) |
  2024. FIELD_PREP(WMI_TLV_LEN,
  2025. len - TLV_HDR_SIZE);
  2026. reg1 = &chan_info->reg_info_1;
  2027. reg2 = &chan_info->reg_info_2;
  2028. chan_info->mhz = tchan_info->mhz;
  2029. chan_info->band_center_freq1 = tchan_info->cfreq1;
  2030. chan_info->band_center_freq2 = tchan_info->cfreq2;
  2031. if (tchan_info->is_chan_passive)
  2032. chan_info->info |= WMI_CHAN_INFO_PASSIVE;
  2033. if (tchan_info->allow_he)
  2034. chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
  2035. else if (tchan_info->allow_vht)
  2036. chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
  2037. else if (tchan_info->allow_ht)
  2038. chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
  2039. if (tchan_info->half_rate)
  2040. chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
  2041. if (tchan_info->quarter_rate)
  2042. chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
  2043. if (tchan_info->psc_channel)
  2044. chan_info->info |= WMI_CHAN_INFO_PSC;
  2045. if (tchan_info->dfs_set)
  2046. chan_info->info |= WMI_CHAN_INFO_DFS;
  2047. chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
  2048. tchan_info->phy_mode);
  2049. *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
  2050. tchan_info->minpower);
  2051. *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
  2052. tchan_info->maxpower);
  2053. *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
  2054. tchan_info->maxregpower);
  2055. *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
  2056. tchan_info->reg_class_id);
  2057. *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
  2058. tchan_info->antennamax);
  2059. *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
  2060. tchan_info->maxregpower);
  2061. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2062. "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
  2063. i, chan_info->mhz, chan_info->info);
  2064. ptr += sizeof(*chan_info);
  2065. tchan_info++;
  2066. }
  2067. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
  2068. if (ret) {
  2069. ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
  2070. dev_kfree_skb(skb);
  2071. return ret;
  2072. }
  2073. num_sends++;
  2074. }
  2075. return 0;
  2076. }
  2077. int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
  2078. struct wmi_wmm_params_all_arg *param)
  2079. {
  2080. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2081. struct wmi_vdev_set_wmm_params_cmd *cmd;
  2082. struct wmi_wmm_params *wmm_param;
  2083. struct wmi_wmm_params_arg *wmi_wmm_arg;
  2084. struct sk_buff *skb;
  2085. int ret, ac;
  2086. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2087. if (!skb)
  2088. return -ENOMEM;
  2089. cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
  2090. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2091. WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
  2092. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2093. cmd->vdev_id = vdev_id;
  2094. cmd->wmm_param_type = 0;
  2095. for (ac = 0; ac < WME_NUM_AC; ac++) {
  2096. switch (ac) {
  2097. case WME_AC_BE:
  2098. wmi_wmm_arg = &param->ac_be;
  2099. break;
  2100. case WME_AC_BK:
  2101. wmi_wmm_arg = &param->ac_bk;
  2102. break;
  2103. case WME_AC_VI:
  2104. wmi_wmm_arg = &param->ac_vi;
  2105. break;
  2106. case WME_AC_VO:
  2107. wmi_wmm_arg = &param->ac_vo;
  2108. break;
  2109. }
  2110. wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
  2111. wmm_param->tlv_header =
  2112. FIELD_PREP(WMI_TLV_TAG,
  2113. WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
  2114. FIELD_PREP(WMI_TLV_LEN,
  2115. sizeof(*wmm_param) - TLV_HDR_SIZE);
  2116. wmm_param->aifs = wmi_wmm_arg->aifs;
  2117. wmm_param->cwmin = wmi_wmm_arg->cwmin;
  2118. wmm_param->cwmax = wmi_wmm_arg->cwmax;
  2119. wmm_param->txoplimit = wmi_wmm_arg->txop;
  2120. wmm_param->acm = wmi_wmm_arg->acm;
  2121. wmm_param->no_ack = wmi_wmm_arg->no_ack;
  2122. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2123. "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
  2124. ac, wmm_param->aifs, wmm_param->cwmin,
  2125. wmm_param->cwmax, wmm_param->txoplimit,
  2126. wmm_param->acm, wmm_param->no_ack);
  2127. }
  2128. ret = ath11k_wmi_cmd_send(wmi, skb,
  2129. WMI_VDEV_SET_WMM_PARAMS_CMDID);
  2130. if (ret) {
  2131. ath11k_warn(ar->ab,
  2132. "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
  2133. dev_kfree_skb(skb);
  2134. }
  2135. return ret;
  2136. }
  2137. int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
  2138. u32 pdev_id)
  2139. {
  2140. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2141. struct wmi_dfs_phyerr_offload_cmd *cmd;
  2142. struct sk_buff *skb;
  2143. int ret;
  2144. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2145. if (!skb)
  2146. return -ENOMEM;
  2147. cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
  2148. cmd->tlv_header =
  2149. FIELD_PREP(WMI_TLV_TAG,
  2150. WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
  2151. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2152. cmd->pdev_id = pdev_id;
  2153. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2154. "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
  2155. ret = ath11k_wmi_cmd_send(wmi, skb,
  2156. WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
  2157. if (ret) {
  2158. ath11k_warn(ar->ab,
  2159. "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
  2160. dev_kfree_skb(skb);
  2161. }
  2162. return ret;
  2163. }
  2164. int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
  2165. u32 tid, u32 initiator, u32 reason)
  2166. {
  2167. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2168. struct wmi_delba_send_cmd *cmd;
  2169. struct sk_buff *skb;
  2170. int ret;
  2171. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2172. if (!skb)
  2173. return -ENOMEM;
  2174. cmd = (struct wmi_delba_send_cmd *)skb->data;
  2175. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
  2176. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2177. cmd->vdev_id = vdev_id;
  2178. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  2179. cmd->tid = tid;
  2180. cmd->initiator = initiator;
  2181. cmd->reasoncode = reason;
  2182. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2183. "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
  2184. vdev_id, mac, tid, initiator, reason);
  2185. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
  2186. if (ret) {
  2187. ath11k_warn(ar->ab,
  2188. "failed to send WMI_DELBA_SEND_CMDID cmd\n");
  2189. dev_kfree_skb(skb);
  2190. }
  2191. return ret;
  2192. }
  2193. int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
  2194. u32 tid, u32 status)
  2195. {
  2196. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2197. struct wmi_addba_setresponse_cmd *cmd;
  2198. struct sk_buff *skb;
  2199. int ret;
  2200. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2201. if (!skb)
  2202. return -ENOMEM;
  2203. cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
  2204. cmd->tlv_header =
  2205. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
  2206. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2207. cmd->vdev_id = vdev_id;
  2208. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  2209. cmd->tid = tid;
  2210. cmd->statuscode = status;
  2211. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2212. "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
  2213. vdev_id, mac, tid, status);
  2214. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
  2215. if (ret) {
  2216. ath11k_warn(ar->ab,
  2217. "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
  2218. dev_kfree_skb(skb);
  2219. }
  2220. return ret;
  2221. }
  2222. int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
  2223. u32 tid, u32 buf_size)
  2224. {
  2225. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2226. struct wmi_addba_send_cmd *cmd;
  2227. struct sk_buff *skb;
  2228. int ret;
  2229. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2230. if (!skb)
  2231. return -ENOMEM;
  2232. cmd = (struct wmi_addba_send_cmd *)skb->data;
  2233. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
  2234. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2235. cmd->vdev_id = vdev_id;
  2236. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  2237. cmd->tid = tid;
  2238. cmd->buffersize = buf_size;
  2239. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2240. "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
  2241. vdev_id, mac, tid, buf_size);
  2242. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
  2243. if (ret) {
  2244. ath11k_warn(ar->ab,
  2245. "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
  2246. dev_kfree_skb(skb);
  2247. }
  2248. return ret;
  2249. }
  2250. int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
  2251. {
  2252. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2253. struct wmi_addba_clear_resp_cmd *cmd;
  2254. struct sk_buff *skb;
  2255. int ret;
  2256. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2257. if (!skb)
  2258. return -ENOMEM;
  2259. cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
  2260. cmd->tlv_header =
  2261. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
  2262. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2263. cmd->vdev_id = vdev_id;
  2264. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  2265. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2266. "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
  2267. vdev_id, mac);
  2268. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
  2269. if (ret) {
  2270. ath11k_warn(ar->ab,
  2271. "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
  2272. dev_kfree_skb(skb);
  2273. }
  2274. return ret;
  2275. }
  2276. int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
  2277. {
  2278. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2279. struct wmi_pdev_pktlog_filter_cmd *cmd;
  2280. struct wmi_pdev_pktlog_filter_info *info;
  2281. struct sk_buff *skb;
  2282. struct wmi_tlv *tlv;
  2283. void *ptr;
  2284. int ret, len;
  2285. len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
  2286. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2287. if (!skb)
  2288. return -ENOMEM;
  2289. cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
  2290. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
  2291. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2292. cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
  2293. cmd->num_mac = 1;
  2294. cmd->enable = enable;
  2295. ptr = skb->data + sizeof(*cmd);
  2296. tlv = ptr;
  2297. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  2298. FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
  2299. ptr += TLV_HDR_SIZE;
  2300. info = ptr;
  2301. ether_addr_copy(info->peer_macaddr.addr, addr);
  2302. info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
  2303. FIELD_PREP(WMI_TLV_LEN,
  2304. sizeof(*info) - TLV_HDR_SIZE);
  2305. ret = ath11k_wmi_cmd_send(wmi, skb,
  2306. WMI_PDEV_PKTLOG_FILTER_CMDID);
  2307. if (ret) {
  2308. ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
  2309. dev_kfree_skb(skb);
  2310. }
  2311. return ret;
  2312. }
  2313. int
  2314. ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
  2315. struct wmi_init_country_params init_cc_params)
  2316. {
  2317. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2318. struct wmi_init_country_cmd *cmd;
  2319. struct sk_buff *skb;
  2320. int ret;
  2321. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2322. if (!skb)
  2323. return -ENOMEM;
  2324. cmd = (struct wmi_init_country_cmd *)skb->data;
  2325. cmd->tlv_header =
  2326. FIELD_PREP(WMI_TLV_TAG,
  2327. WMI_TAG_SET_INIT_COUNTRY_CMD) |
  2328. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2329. cmd->pdev_id = ar->pdev->pdev_id;
  2330. switch (init_cc_params.flags) {
  2331. case ALPHA_IS_SET:
  2332. cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
  2333. memcpy((u8 *)&cmd->cc_info.alpha2,
  2334. init_cc_params.cc_info.alpha2, 3);
  2335. break;
  2336. case CC_IS_SET:
  2337. cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
  2338. cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
  2339. break;
  2340. case REGDMN_IS_SET:
  2341. cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
  2342. cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
  2343. break;
  2344. default:
  2345. ret = -EINVAL;
  2346. goto out;
  2347. }
  2348. ret = ath11k_wmi_cmd_send(wmi, skb,
  2349. WMI_SET_INIT_COUNTRY_CMDID);
  2350. out:
  2351. if (ret) {
  2352. ath11k_warn(ar->ab,
  2353. "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
  2354. ret);
  2355. dev_kfree_skb(skb);
  2356. }
  2357. return ret;
  2358. }
  2359. int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
  2360. struct wmi_set_current_country_params *param)
  2361. {
  2362. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2363. struct wmi_set_current_country_cmd *cmd;
  2364. struct sk_buff *skb;
  2365. int ret;
  2366. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2367. if (!skb)
  2368. return -ENOMEM;
  2369. cmd = (struct wmi_set_current_country_cmd *)skb->data;
  2370. cmd->tlv_header =
  2371. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
  2372. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2373. cmd->pdev_id = ar->pdev->pdev_id;
  2374. memcpy(&cmd->new_alpha2, &param->alpha2, 3);
  2375. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
  2376. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2377. "set current country pdev id %d alpha2 %c%c\n",
  2378. ar->pdev->pdev_id,
  2379. param->alpha2[0],
  2380. param->alpha2[1]);
  2381. if (ret) {
  2382. ath11k_warn(ar->ab,
  2383. "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
  2384. dev_kfree_skb(skb);
  2385. }
  2386. return ret;
  2387. }
  2388. int
  2389. ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
  2390. struct thermal_mitigation_params *param)
  2391. {
  2392. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2393. struct wmi_therm_throt_config_request_cmd *cmd;
  2394. struct wmi_therm_throt_level_config_info *lvl_conf;
  2395. struct wmi_tlv *tlv;
  2396. struct sk_buff *skb;
  2397. int i, ret, len;
  2398. len = sizeof(*cmd) + TLV_HDR_SIZE +
  2399. THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
  2400. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2401. if (!skb)
  2402. return -ENOMEM;
  2403. cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
  2404. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
  2405. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2406. cmd->pdev_id = ar->pdev->pdev_id;
  2407. cmd->enable = param->enable;
  2408. cmd->dc = param->dc;
  2409. cmd->dc_per_event = param->dc_per_event;
  2410. cmd->therm_throt_levels = THERMAL_LEVELS;
  2411. tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
  2412. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  2413. FIELD_PREP(WMI_TLV_LEN,
  2414. (THERMAL_LEVELS *
  2415. sizeof(struct wmi_therm_throt_level_config_info)));
  2416. lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
  2417. sizeof(*cmd) +
  2418. TLV_HDR_SIZE);
  2419. for (i = 0; i < THERMAL_LEVELS; i++) {
  2420. lvl_conf->tlv_header =
  2421. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
  2422. FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
  2423. lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
  2424. lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
  2425. lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
  2426. lvl_conf->prio = param->levelconf[i].priority;
  2427. lvl_conf++;
  2428. }
  2429. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
  2430. if (ret) {
  2431. ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
  2432. dev_kfree_skb(skb);
  2433. }
  2434. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2435. "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
  2436. ar->pdev->pdev_id, param->enable, param->dc,
  2437. param->dc_per_event, THERMAL_LEVELS);
  2438. return ret;
  2439. }
  2440. int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
  2441. struct wmi_11d_scan_start_params *param)
  2442. {
  2443. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2444. struct wmi_11d_scan_start_cmd *cmd;
  2445. struct sk_buff *skb;
  2446. int ret;
  2447. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2448. if (!skb)
  2449. return -ENOMEM;
  2450. cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
  2451. cmd->tlv_header =
  2452. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
  2453. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2454. cmd->vdev_id = param->vdev_id;
  2455. cmd->scan_period_msec = param->scan_period_msec;
  2456. cmd->start_interval_msec = param->start_interval_msec;
  2457. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
  2458. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2459. "send 11d scan start vdev id %d period %d ms internal %d ms\n",
  2460. cmd->vdev_id,
  2461. cmd->scan_period_msec,
  2462. cmd->start_interval_msec);
  2463. if (ret) {
  2464. ath11k_warn(ar->ab,
  2465. "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
  2466. dev_kfree_skb(skb);
  2467. }
  2468. return ret;
  2469. }
  2470. int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
  2471. {
  2472. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2473. struct wmi_11d_scan_stop_cmd *cmd;
  2474. struct sk_buff *skb;
  2475. int ret;
  2476. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2477. if (!skb)
  2478. return -ENOMEM;
  2479. cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
  2480. cmd->tlv_header =
  2481. FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
  2482. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2483. cmd->vdev_id = vdev_id;
  2484. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
  2485. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2486. "send 11d scan stop vdev id %d\n",
  2487. cmd->vdev_id);
  2488. if (ret) {
  2489. ath11k_warn(ar->ab,
  2490. "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
  2491. dev_kfree_skb(skb);
  2492. }
  2493. return ret;
  2494. }
  2495. int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
  2496. {
  2497. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2498. struct wmi_pktlog_enable_cmd *cmd;
  2499. struct sk_buff *skb;
  2500. int ret;
  2501. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2502. if (!skb)
  2503. return -ENOMEM;
  2504. cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
  2505. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
  2506. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2507. cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
  2508. cmd->evlist = pktlog_filter;
  2509. cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
  2510. ret = ath11k_wmi_cmd_send(wmi, skb,
  2511. WMI_PDEV_PKTLOG_ENABLE_CMDID);
  2512. if (ret) {
  2513. ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
  2514. dev_kfree_skb(skb);
  2515. }
  2516. return ret;
  2517. }
  2518. int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
  2519. {
  2520. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2521. struct wmi_pktlog_disable_cmd *cmd;
  2522. struct sk_buff *skb;
  2523. int ret;
  2524. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
  2525. if (!skb)
  2526. return -ENOMEM;
  2527. cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
  2528. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
  2529. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  2530. cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
  2531. ret = ath11k_wmi_cmd_send(wmi, skb,
  2532. WMI_PDEV_PKTLOG_DISABLE_CMDID);
  2533. if (ret) {
  2534. ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
  2535. dev_kfree_skb(skb);
  2536. }
  2537. return ret;
  2538. }
  2539. void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
  2540. {
  2541. twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
  2542. twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
  2543. twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
  2544. twt_params->congestion_thresh_teardown =
  2545. ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
  2546. twt_params->congestion_thresh_critical =
  2547. ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
  2548. twt_params->interference_thresh_teardown =
  2549. ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
  2550. twt_params->interference_thresh_setup =
  2551. ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
  2552. twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
  2553. twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
  2554. twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
  2555. twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
  2556. twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
  2557. twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
  2558. twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
  2559. twt_params->remove_sta_slot_interval =
  2560. ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
  2561. /* TODO add MBSSID support */
  2562. twt_params->mbss_support = 0;
  2563. }
  2564. int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
  2565. struct wmi_twt_enable_params *params)
  2566. {
  2567. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2568. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2569. struct wmi_twt_enable_params_cmd *cmd;
  2570. struct sk_buff *skb;
  2571. int ret, len;
  2572. len = sizeof(*cmd);
  2573. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2574. if (!skb)
  2575. return -ENOMEM;
  2576. cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
  2577. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
  2578. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2579. cmd->pdev_id = pdev_id;
  2580. cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
  2581. cmd->default_slot_size = params->default_slot_size;
  2582. cmd->congestion_thresh_setup = params->congestion_thresh_setup;
  2583. cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
  2584. cmd->congestion_thresh_critical = params->congestion_thresh_critical;
  2585. cmd->interference_thresh_teardown = params->interference_thresh_teardown;
  2586. cmd->interference_thresh_setup = params->interference_thresh_setup;
  2587. cmd->min_no_sta_setup = params->min_no_sta_setup;
  2588. cmd->min_no_sta_teardown = params->min_no_sta_teardown;
  2589. cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
  2590. cmd->min_no_twt_slots = params->min_no_twt_slots;
  2591. cmd->max_no_sta_twt = params->max_no_sta_twt;
  2592. cmd->mode_check_interval = params->mode_check_interval;
  2593. cmd->add_sta_slot_interval = params->add_sta_slot_interval;
  2594. cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
  2595. cmd->mbss_support = params->mbss_support;
  2596. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
  2597. if (ret) {
  2598. ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
  2599. dev_kfree_skb(skb);
  2600. } else {
  2601. ar->twt_enabled = 1;
  2602. }
  2603. return ret;
  2604. }
  2605. int
  2606. ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
  2607. {
  2608. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2609. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2610. struct wmi_twt_disable_params_cmd *cmd;
  2611. struct sk_buff *skb;
  2612. int ret, len;
  2613. len = sizeof(*cmd);
  2614. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2615. if (!skb)
  2616. return -ENOMEM;
  2617. cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
  2618. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
  2619. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2620. cmd->pdev_id = pdev_id;
  2621. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
  2622. if (ret) {
  2623. ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
  2624. dev_kfree_skb(skb);
  2625. } else {
  2626. ar->twt_enabled = 0;
  2627. }
  2628. return ret;
  2629. }
  2630. int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
  2631. struct wmi_twt_add_dialog_params *params)
  2632. {
  2633. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2634. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2635. struct wmi_twt_add_dialog_params_cmd *cmd;
  2636. struct sk_buff *skb;
  2637. int ret, len;
  2638. len = sizeof(*cmd);
  2639. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2640. if (!skb)
  2641. return -ENOMEM;
  2642. cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
  2643. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
  2644. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2645. cmd->vdev_id = params->vdev_id;
  2646. ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
  2647. cmd->dialog_id = params->dialog_id;
  2648. cmd->wake_intvl_us = params->wake_intvl_us;
  2649. cmd->wake_intvl_mantis = params->wake_intvl_mantis;
  2650. cmd->wake_dura_us = params->wake_dura_us;
  2651. cmd->sp_offset_us = params->sp_offset_us;
  2652. cmd->flags = params->twt_cmd;
  2653. if (params->flag_bcast)
  2654. cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
  2655. if (params->flag_trigger)
  2656. cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
  2657. if (params->flag_flow_type)
  2658. cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
  2659. if (params->flag_protection)
  2660. cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
  2661. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2662. "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
  2663. cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
  2664. cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
  2665. cmd->flags);
  2666. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
  2667. if (ret) {
  2668. ath11k_warn(ab,
  2669. "failed to send wmi command to add twt dialog: %d",
  2670. ret);
  2671. dev_kfree_skb(skb);
  2672. }
  2673. return ret;
  2674. }
  2675. int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
  2676. struct wmi_twt_del_dialog_params *params)
  2677. {
  2678. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2679. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2680. struct wmi_twt_del_dialog_params_cmd *cmd;
  2681. struct sk_buff *skb;
  2682. int ret, len;
  2683. len = sizeof(*cmd);
  2684. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2685. if (!skb)
  2686. return -ENOMEM;
  2687. cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
  2688. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
  2689. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2690. cmd->vdev_id = params->vdev_id;
  2691. ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
  2692. cmd->dialog_id = params->dialog_id;
  2693. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2694. "wmi delete twt dialog vdev %u dialog id %u\n",
  2695. cmd->vdev_id, cmd->dialog_id);
  2696. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
  2697. if (ret) {
  2698. ath11k_warn(ab,
  2699. "failed to send wmi command to delete twt dialog: %d",
  2700. ret);
  2701. dev_kfree_skb(skb);
  2702. }
  2703. return ret;
  2704. }
  2705. int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
  2706. struct wmi_twt_pause_dialog_params *params)
  2707. {
  2708. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2709. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2710. struct wmi_twt_pause_dialog_params_cmd *cmd;
  2711. struct sk_buff *skb;
  2712. int ret, len;
  2713. len = sizeof(*cmd);
  2714. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2715. if (!skb)
  2716. return -ENOMEM;
  2717. cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
  2718. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2719. WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
  2720. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2721. cmd->vdev_id = params->vdev_id;
  2722. ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
  2723. cmd->dialog_id = params->dialog_id;
  2724. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2725. "wmi pause twt dialog vdev %u dialog id %u\n",
  2726. cmd->vdev_id, cmd->dialog_id);
  2727. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
  2728. if (ret) {
  2729. ath11k_warn(ab,
  2730. "failed to send wmi command to pause twt dialog: %d",
  2731. ret);
  2732. dev_kfree_skb(skb);
  2733. }
  2734. return ret;
  2735. }
  2736. int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
  2737. struct wmi_twt_resume_dialog_params *params)
  2738. {
  2739. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2740. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2741. struct wmi_twt_resume_dialog_params_cmd *cmd;
  2742. struct sk_buff *skb;
  2743. int ret, len;
  2744. len = sizeof(*cmd);
  2745. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2746. if (!skb)
  2747. return -ENOMEM;
  2748. cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
  2749. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2750. WMI_TAG_TWT_RESUME_DIALOG_CMD) |
  2751. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2752. cmd->vdev_id = params->vdev_id;
  2753. ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
  2754. cmd->dialog_id = params->dialog_id;
  2755. cmd->sp_offset_us = params->sp_offset_us;
  2756. cmd->next_twt_size = params->next_twt_size;
  2757. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2758. "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
  2759. cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
  2760. cmd->next_twt_size);
  2761. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
  2762. if (ret) {
  2763. ath11k_warn(ab,
  2764. "failed to send wmi command to resume twt dialog: %d",
  2765. ret);
  2766. dev_kfree_skb(skb);
  2767. }
  2768. return ret;
  2769. }
  2770. int
  2771. ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
  2772. struct ieee80211_he_obss_pd *he_obss_pd)
  2773. {
  2774. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2775. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2776. struct wmi_obss_spatial_reuse_params_cmd *cmd;
  2777. struct sk_buff *skb;
  2778. int ret, len;
  2779. len = sizeof(*cmd);
  2780. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2781. if (!skb)
  2782. return -ENOMEM;
  2783. cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
  2784. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2785. WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
  2786. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2787. cmd->vdev_id = vdev_id;
  2788. cmd->enable = he_obss_pd->enable;
  2789. cmd->obss_min = he_obss_pd->min_offset;
  2790. cmd->obss_max = he_obss_pd->max_offset;
  2791. ret = ath11k_wmi_cmd_send(wmi, skb,
  2792. WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
  2793. if (ret) {
  2794. ath11k_warn(ab,
  2795. "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
  2796. dev_kfree_skb(skb);
  2797. }
  2798. return ret;
  2799. }
  2800. int
  2801. ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
  2802. {
  2803. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2804. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2805. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2806. struct sk_buff *skb;
  2807. int ret, len;
  2808. len = sizeof(*cmd);
  2809. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2810. if (!skb)
  2811. return -ENOMEM;
  2812. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2813. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  2814. WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
  2815. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2816. cmd->pdev_id = ar->pdev->pdev_id;
  2817. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2818. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2819. "obss pd pdev_id %d bss color bitmap %08x %08x\n",
  2820. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2821. ret = ath11k_wmi_cmd_send(wmi, skb,
  2822. WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
  2823. if (ret) {
  2824. ath11k_warn(ab,
  2825. "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
  2826. dev_kfree_skb(skb);
  2827. }
  2828. return ret;
  2829. }
  2830. int
  2831. ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
  2832. {
  2833. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2834. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2835. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2836. struct sk_buff *skb;
  2837. int ret, len;
  2838. len = sizeof(*cmd);
  2839. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2840. if (!skb)
  2841. return -ENOMEM;
  2842. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2843. cmd->tlv_header =
  2844. FIELD_PREP(WMI_TLV_TAG,
  2845. WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
  2846. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2847. cmd->pdev_id = ar->pdev->pdev_id;
  2848. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2849. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2850. "obss pd pdev_id %d partial bssid bitmap %08x %08x\n",
  2851. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2852. ret = ath11k_wmi_cmd_send(wmi, skb,
  2853. WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
  2854. if (ret) {
  2855. ath11k_warn(ab,
  2856. "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
  2857. dev_kfree_skb(skb);
  2858. }
  2859. return ret;
  2860. }
  2861. int
  2862. ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
  2863. {
  2864. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2865. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2866. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2867. struct sk_buff *skb;
  2868. int ret, len;
  2869. len = sizeof(*cmd);
  2870. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2871. if (!skb)
  2872. return -ENOMEM;
  2873. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2874. cmd->tlv_header =
  2875. FIELD_PREP(WMI_TLV_TAG,
  2876. WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
  2877. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2878. cmd->pdev_id = ar->pdev->pdev_id;
  2879. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2880. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2881. "obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n",
  2882. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2883. ret = ath11k_wmi_cmd_send(wmi, skb,
  2884. WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
  2885. if (ret) {
  2886. ath11k_warn(ab,
  2887. "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
  2888. dev_kfree_skb(skb);
  2889. }
  2890. return ret;
  2891. }
  2892. int
  2893. ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
  2894. {
  2895. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2896. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2897. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2898. struct sk_buff *skb;
  2899. int ret, len;
  2900. len = sizeof(*cmd);
  2901. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2902. if (!skb)
  2903. return -ENOMEM;
  2904. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2905. cmd->tlv_header =
  2906. FIELD_PREP(WMI_TLV_TAG,
  2907. WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
  2908. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2909. cmd->pdev_id = ar->pdev->pdev_id;
  2910. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2911. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2912. "obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n",
  2913. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2914. ret = ath11k_wmi_cmd_send(wmi, skb,
  2915. WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
  2916. if (ret) {
  2917. ath11k_warn(ab,
  2918. "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
  2919. dev_kfree_skb(skb);
  2920. }
  2921. return ret;
  2922. }
  2923. int
  2924. ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
  2925. {
  2926. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2927. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2928. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2929. struct sk_buff *skb;
  2930. int ret, len;
  2931. len = sizeof(*cmd);
  2932. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2933. if (!skb)
  2934. return -ENOMEM;
  2935. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2936. cmd->tlv_header =
  2937. FIELD_PREP(WMI_TLV_TAG,
  2938. WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
  2939. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2940. cmd->pdev_id = ar->pdev->pdev_id;
  2941. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2942. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2943. "obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n",
  2944. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2945. ret = ath11k_wmi_cmd_send(wmi, skb,
  2946. WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
  2947. if (ret) {
  2948. ath11k_warn(ab,
  2949. "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
  2950. dev_kfree_skb(skb);
  2951. }
  2952. return ret;
  2953. }
  2954. int
  2955. ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
  2956. {
  2957. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2958. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2959. struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
  2960. struct sk_buff *skb;
  2961. int ret, len;
  2962. len = sizeof(*cmd);
  2963. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2964. if (!skb)
  2965. return -ENOMEM;
  2966. cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
  2967. cmd->tlv_header =
  2968. FIELD_PREP(WMI_TLV_TAG,
  2969. WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
  2970. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  2971. cmd->pdev_id = ar->pdev->pdev_id;
  2972. memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
  2973. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  2974. "obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n",
  2975. cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
  2976. ret = ath11k_wmi_cmd_send(wmi, skb,
  2977. WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
  2978. if (ret) {
  2979. ath11k_warn(ab,
  2980. "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
  2981. dev_kfree_skb(skb);
  2982. }
  2983. return ret;
  2984. }
  2985. int
  2986. ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
  2987. u8 bss_color, u32 period,
  2988. bool enable)
  2989. {
  2990. struct ath11k_pdev_wmi *wmi = ar->wmi;
  2991. struct ath11k_base *ab = wmi->wmi_ab->ab;
  2992. struct wmi_obss_color_collision_cfg_params_cmd *cmd;
  2993. struct sk_buff *skb;
  2994. int ret, len;
  2995. len = sizeof(*cmd);
  2996. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  2997. if (!skb)
  2998. return -ENOMEM;
  2999. cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
  3000. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3001. WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
  3002. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  3003. cmd->vdev_id = vdev_id;
  3004. cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
  3005. ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
  3006. cmd->current_bss_color = bss_color;
  3007. cmd->detection_period_ms = period;
  3008. cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
  3009. cmd->free_slot_expiry_time_ms = 0;
  3010. cmd->flags = 0;
  3011. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3012. "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
  3013. cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
  3014. cmd->detection_period_ms, cmd->scan_period_ms);
  3015. ret = ath11k_wmi_cmd_send(wmi, skb,
  3016. WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
  3017. if (ret) {
  3018. ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
  3019. dev_kfree_skb(skb);
  3020. }
  3021. return ret;
  3022. }
  3023. int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
  3024. bool enable)
  3025. {
  3026. struct ath11k_pdev_wmi *wmi = ar->wmi;
  3027. struct ath11k_base *ab = wmi->wmi_ab->ab;
  3028. struct wmi_bss_color_change_enable_params_cmd *cmd;
  3029. struct sk_buff *skb;
  3030. int ret, len;
  3031. len = sizeof(*cmd);
  3032. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  3033. if (!skb)
  3034. return -ENOMEM;
  3035. cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
  3036. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
  3037. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  3038. cmd->vdev_id = vdev_id;
  3039. cmd->enable = enable ? 1 : 0;
  3040. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3041. "wmi_send_bss_color_change_enable id %d enable %d\n",
  3042. cmd->vdev_id, cmd->enable);
  3043. ret = ath11k_wmi_cmd_send(wmi, skb,
  3044. WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
  3045. if (ret) {
  3046. ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
  3047. dev_kfree_skb(skb);
  3048. }
  3049. return ret;
  3050. }
  3051. int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
  3052. struct sk_buff *tmpl)
  3053. {
  3054. struct wmi_tlv *tlv;
  3055. struct sk_buff *skb;
  3056. void *ptr;
  3057. int ret, len;
  3058. size_t aligned_len;
  3059. struct wmi_fils_discovery_tmpl_cmd *cmd;
  3060. aligned_len = roundup(tmpl->len, 4);
  3061. len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
  3062. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3063. "WMI vdev %i set FILS discovery template\n", vdev_id);
  3064. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  3065. if (!skb)
  3066. return -ENOMEM;
  3067. cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
  3068. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3069. WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
  3070. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3071. cmd->vdev_id = vdev_id;
  3072. cmd->buf_len = tmpl->len;
  3073. ptr = skb->data + sizeof(*cmd);
  3074. tlv = ptr;
  3075. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  3076. FIELD_PREP(WMI_TLV_LEN, aligned_len);
  3077. memcpy(tlv->value, tmpl->data, tmpl->len);
  3078. ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
  3079. if (ret) {
  3080. ath11k_warn(ar->ab,
  3081. "WMI vdev %i failed to send FILS discovery template command\n",
  3082. vdev_id);
  3083. dev_kfree_skb(skb);
  3084. }
  3085. return ret;
  3086. }
  3087. int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
  3088. struct sk_buff *tmpl)
  3089. {
  3090. struct wmi_probe_tmpl_cmd *cmd;
  3091. struct wmi_bcn_prb_info *probe_info;
  3092. struct wmi_tlv *tlv;
  3093. struct sk_buff *skb;
  3094. void *ptr;
  3095. int ret, len;
  3096. size_t aligned_len = roundup(tmpl->len, 4);
  3097. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3098. "WMI vdev %i set probe response template\n", vdev_id);
  3099. len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
  3100. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  3101. if (!skb)
  3102. return -ENOMEM;
  3103. cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
  3104. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
  3105. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3106. cmd->vdev_id = vdev_id;
  3107. cmd->buf_len = tmpl->len;
  3108. ptr = skb->data + sizeof(*cmd);
  3109. probe_info = ptr;
  3110. len = sizeof(*probe_info);
  3111. probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3112. WMI_TAG_BCN_PRB_INFO) |
  3113. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  3114. probe_info->caps = 0;
  3115. probe_info->erp = 0;
  3116. ptr += sizeof(*probe_info);
  3117. tlv = ptr;
  3118. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  3119. FIELD_PREP(WMI_TLV_LEN, aligned_len);
  3120. memcpy(tlv->value, tmpl->data, tmpl->len);
  3121. ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
  3122. if (ret) {
  3123. ath11k_warn(ar->ab,
  3124. "WMI vdev %i failed to send probe response template command\n",
  3125. vdev_id);
  3126. dev_kfree_skb(skb);
  3127. }
  3128. return ret;
  3129. }
  3130. int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
  3131. bool unsol_bcast_probe_resp_enabled)
  3132. {
  3133. struct sk_buff *skb;
  3134. int ret, len;
  3135. struct wmi_fils_discovery_cmd *cmd;
  3136. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3137. "WMI vdev %i set %s interval to %u TU\n",
  3138. vdev_id, unsol_bcast_probe_resp_enabled ?
  3139. "unsolicited broadcast probe response" : "FILS discovery",
  3140. interval);
  3141. len = sizeof(*cmd);
  3142. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  3143. if (!skb)
  3144. return -ENOMEM;
  3145. cmd = (struct wmi_fils_discovery_cmd *)skb->data;
  3146. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
  3147. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  3148. cmd->vdev_id = vdev_id;
  3149. cmd->interval = interval;
  3150. cmd->config = unsol_bcast_probe_resp_enabled;
  3151. ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
  3152. if (ret) {
  3153. ath11k_warn(ar->ab,
  3154. "WMI vdev %i failed to send FILS discovery enable/disable command\n",
  3155. vdev_id);
  3156. dev_kfree_skb(skb);
  3157. }
  3158. return ret;
  3159. }
  3160. static void
  3161. ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
  3162. {
  3163. const void **tb;
  3164. const struct wmi_obss_color_collision_event *ev;
  3165. struct ath11k_vif *arvif;
  3166. int ret;
  3167. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  3168. if (IS_ERR(tb)) {
  3169. ret = PTR_ERR(tb);
  3170. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  3171. return;
  3172. }
  3173. rcu_read_lock();
  3174. ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
  3175. if (!ev) {
  3176. ath11k_warn(ab, "failed to fetch obss color collision ev");
  3177. goto exit;
  3178. }
  3179. arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
  3180. if (!arvif) {
  3181. ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
  3182. ev->vdev_id);
  3183. goto exit;
  3184. }
  3185. switch (ev->evt_type) {
  3186. case WMI_BSS_COLOR_COLLISION_DETECTION:
  3187. ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
  3188. GFP_KERNEL);
  3189. ath11k_dbg(ab, ATH11K_DBG_WMI,
  3190. "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
  3191. ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
  3192. break;
  3193. case WMI_BSS_COLOR_COLLISION_DISABLE:
  3194. case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
  3195. case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
  3196. break;
  3197. default:
  3198. ath11k_warn(ab, "received unknown obss color collision detection event\n");
  3199. }
  3200. exit:
  3201. kfree(tb);
  3202. rcu_read_unlock();
  3203. }
  3204. static void
  3205. ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
  3206. struct wmi_host_pdev_band_to_mac *band_to_mac)
  3207. {
  3208. u8 i;
  3209. struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
  3210. struct ath11k_pdev *pdev;
  3211. for (i = 0; i < soc->num_radios; i++) {
  3212. pdev = &soc->pdevs[i];
  3213. hal_reg_cap = &soc->hal_reg_cap[i];
  3214. band_to_mac[i].pdev_id = pdev->pdev_id;
  3215. switch (pdev->cap.supported_bands) {
  3216. case WMI_HOST_WLAN_2G_5G_CAP:
  3217. band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
  3218. band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
  3219. break;
  3220. case WMI_HOST_WLAN_2G_CAP:
  3221. band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
  3222. band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
  3223. break;
  3224. case WMI_HOST_WLAN_5G_CAP:
  3225. band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
  3226. band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
  3227. break;
  3228. default:
  3229. break;
  3230. }
  3231. }
  3232. }
  3233. static void
  3234. ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
  3235. struct target_resource_config *tg_cfg)
  3236. {
  3237. wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
  3238. wmi_cfg->num_peers = tg_cfg->num_peers;
  3239. wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
  3240. wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
  3241. wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
  3242. wmi_cfg->num_tids = tg_cfg->num_tids;
  3243. wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
  3244. wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
  3245. wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
  3246. wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
  3247. wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
  3248. wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
  3249. wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
  3250. wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
  3251. wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
  3252. wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
  3253. wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
  3254. wmi_cfg->roam_offload_max_ap_profiles =
  3255. tg_cfg->roam_offload_max_ap_profiles;
  3256. wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
  3257. wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
  3258. wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
  3259. wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
  3260. wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
  3261. wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
  3262. wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
  3263. wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
  3264. tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
  3265. wmi_cfg->vow_config = tg_cfg->vow_config;
  3266. wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
  3267. wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
  3268. wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
  3269. wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
  3270. wmi_cfg->num_tdls_conn_table_entries =
  3271. tg_cfg->num_tdls_conn_table_entries;
  3272. wmi_cfg->beacon_tx_offload_max_vdev =
  3273. tg_cfg->beacon_tx_offload_max_vdev;
  3274. wmi_cfg->num_multicast_filter_entries =
  3275. tg_cfg->num_multicast_filter_entries;
  3276. wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
  3277. wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
  3278. wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
  3279. wmi_cfg->max_tdls_concurrent_sleep_sta =
  3280. tg_cfg->max_tdls_concurrent_sleep_sta;
  3281. wmi_cfg->max_tdls_concurrent_buffer_sta =
  3282. tg_cfg->max_tdls_concurrent_buffer_sta;
  3283. wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
  3284. wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
  3285. wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
  3286. wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
  3287. wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
  3288. wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
  3289. wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
  3290. wmi_cfg->flag1 = tg_cfg->flag1;
  3291. wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
  3292. wmi_cfg->sched_params = tg_cfg->sched_params;
  3293. wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
  3294. wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
  3295. }
  3296. static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
  3297. struct wmi_init_cmd_param *param)
  3298. {
  3299. struct ath11k_base *ab = wmi->wmi_ab->ab;
  3300. struct sk_buff *skb;
  3301. struct wmi_init_cmd *cmd;
  3302. struct wmi_resource_config *cfg;
  3303. struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
  3304. struct wmi_pdev_band_to_mac *band_to_mac;
  3305. struct wlan_host_mem_chunk *host_mem_chunks;
  3306. struct wmi_tlv *tlv;
  3307. size_t ret, len;
  3308. void *ptr;
  3309. u32 hw_mode_len = 0;
  3310. u16 idx;
  3311. if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
  3312. hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
  3313. (param->num_band_to_mac * sizeof(*band_to_mac));
  3314. len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
  3315. (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
  3316. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  3317. if (!skb)
  3318. return -ENOMEM;
  3319. cmd = (struct wmi_init_cmd *)skb->data;
  3320. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
  3321. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3322. ptr = skb->data + sizeof(*cmd);
  3323. cfg = ptr;
  3324. ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
  3325. cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
  3326. FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
  3327. ptr += sizeof(*cfg);
  3328. host_mem_chunks = ptr + TLV_HDR_SIZE;
  3329. len = sizeof(struct wlan_host_mem_chunk);
  3330. for (idx = 0; idx < param->num_mem_chunks; ++idx) {
  3331. host_mem_chunks[idx].tlv_header =
  3332. FIELD_PREP(WMI_TLV_TAG,
  3333. WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
  3334. FIELD_PREP(WMI_TLV_LEN, len);
  3335. host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
  3336. host_mem_chunks[idx].size = param->mem_chunks[idx].len;
  3337. host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
  3338. ath11k_dbg(ab, ATH11K_DBG_WMI,
  3339. "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
  3340. param->mem_chunks[idx].req_id,
  3341. (u64)param->mem_chunks[idx].paddr,
  3342. param->mem_chunks[idx].len);
  3343. }
  3344. cmd->num_host_mem_chunks = param->num_mem_chunks;
  3345. len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
  3346. /* num_mem_chunks is zero */
  3347. tlv = ptr;
  3348. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  3349. FIELD_PREP(WMI_TLV_LEN, len);
  3350. ptr += TLV_HDR_SIZE + len;
  3351. if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
  3352. hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
  3353. hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3354. WMI_TAG_PDEV_SET_HW_MODE_CMD) |
  3355. FIELD_PREP(WMI_TLV_LEN,
  3356. sizeof(*hw_mode) - TLV_HDR_SIZE);
  3357. hw_mode->hw_mode_index = param->hw_mode_id;
  3358. hw_mode->num_band_to_mac = param->num_band_to_mac;
  3359. ptr += sizeof(*hw_mode);
  3360. len = param->num_band_to_mac * sizeof(*band_to_mac);
  3361. tlv = ptr;
  3362. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  3363. FIELD_PREP(WMI_TLV_LEN, len);
  3364. ptr += TLV_HDR_SIZE;
  3365. len = sizeof(*band_to_mac);
  3366. for (idx = 0; idx < param->num_band_to_mac; idx++) {
  3367. band_to_mac = (void *)ptr;
  3368. band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3369. WMI_TAG_PDEV_BAND_TO_MAC) |
  3370. FIELD_PREP(WMI_TLV_LEN,
  3371. len - TLV_HDR_SIZE);
  3372. band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
  3373. band_to_mac->start_freq =
  3374. param->band_to_mac[idx].start_freq;
  3375. band_to_mac->end_freq =
  3376. param->band_to_mac[idx].end_freq;
  3377. ptr += sizeof(*band_to_mac);
  3378. }
  3379. }
  3380. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
  3381. if (ret) {
  3382. ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
  3383. dev_kfree_skb(skb);
  3384. }
  3385. return ret;
  3386. }
  3387. int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
  3388. int pdev_id)
  3389. {
  3390. struct ath11k_wmi_pdev_lro_config_cmd *cmd;
  3391. struct sk_buff *skb;
  3392. int ret;
  3393. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
  3394. if (!skb)
  3395. return -ENOMEM;
  3396. cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
  3397. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
  3398. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3399. get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
  3400. get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
  3401. cmd->pdev_id = pdev_id;
  3402. ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
  3403. if (ret) {
  3404. ath11k_warn(ar->ab,
  3405. "failed to send lro cfg req wmi cmd\n");
  3406. goto err;
  3407. }
  3408. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3409. "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
  3410. return 0;
  3411. err:
  3412. dev_kfree_skb(skb);
  3413. return ret;
  3414. }
  3415. int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
  3416. {
  3417. unsigned long time_left;
  3418. time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
  3419. WMI_SERVICE_READY_TIMEOUT_HZ);
  3420. if (!time_left)
  3421. return -ETIMEDOUT;
  3422. return 0;
  3423. }
  3424. int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
  3425. {
  3426. unsigned long time_left;
  3427. time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
  3428. WMI_SERVICE_READY_TIMEOUT_HZ);
  3429. if (!time_left)
  3430. return -ETIMEDOUT;
  3431. return 0;
  3432. }
  3433. int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
  3434. enum wmi_host_hw_mode_config_type mode)
  3435. {
  3436. struct wmi_pdev_set_hw_mode_cmd_param *cmd;
  3437. struct sk_buff *skb;
  3438. struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
  3439. int len;
  3440. int ret;
  3441. len = sizeof(*cmd);
  3442. skb = ath11k_wmi_alloc_skb(wmi_ab, len);
  3443. if (!skb)
  3444. return -ENOMEM;
  3445. cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
  3446. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
  3447. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3448. cmd->pdev_id = WMI_PDEV_ID_SOC;
  3449. cmd->hw_mode_index = mode;
  3450. ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
  3451. if (ret) {
  3452. ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
  3453. dev_kfree_skb(skb);
  3454. }
  3455. return ret;
  3456. }
  3457. int ath11k_wmi_cmd_init(struct ath11k_base *ab)
  3458. {
  3459. struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
  3460. struct wmi_init_cmd_param init_param;
  3461. struct target_resource_config config;
  3462. memset(&init_param, 0, sizeof(init_param));
  3463. memset(&config, 0, sizeof(config));
  3464. ab->hw_params.hw_ops->wmi_init_config(ab, &config);
  3465. memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
  3466. init_param.res_cfg = &wmi_sc->wlan_resource_config;
  3467. init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
  3468. init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
  3469. init_param.mem_chunks = wmi_sc->mem_chunks;
  3470. if (ab->hw_params.single_pdev_only)
  3471. init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
  3472. init_param.num_band_to_mac = ab->num_radios;
  3473. ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
  3474. return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
  3475. }
  3476. int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
  3477. struct ath11k_wmi_vdev_spectral_conf_param *param)
  3478. {
  3479. struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
  3480. struct sk_buff *skb;
  3481. int ret;
  3482. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
  3483. if (!skb)
  3484. return -ENOMEM;
  3485. cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
  3486. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3487. WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
  3488. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3489. memcpy(&cmd->param, param, sizeof(*param));
  3490. ret = ath11k_wmi_cmd_send(ar->wmi, skb,
  3491. WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
  3492. if (ret) {
  3493. ath11k_warn(ar->ab,
  3494. "failed to send spectral scan config wmi cmd\n");
  3495. goto err;
  3496. }
  3497. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3498. "WMI spectral scan config cmd vdev_id 0x%x\n",
  3499. param->vdev_id);
  3500. return 0;
  3501. err:
  3502. dev_kfree_skb(skb);
  3503. return ret;
  3504. }
  3505. int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
  3506. u32 trigger, u32 enable)
  3507. {
  3508. struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
  3509. struct sk_buff *skb;
  3510. int ret;
  3511. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
  3512. if (!skb)
  3513. return -ENOMEM;
  3514. cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
  3515. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  3516. WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
  3517. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3518. cmd->vdev_id = vdev_id;
  3519. cmd->trigger_cmd = trigger;
  3520. cmd->enable_cmd = enable;
  3521. ret = ath11k_wmi_cmd_send(ar->wmi, skb,
  3522. WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
  3523. if (ret) {
  3524. ath11k_warn(ar->ab,
  3525. "failed to send spectral enable wmi cmd\n");
  3526. goto err;
  3527. }
  3528. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3529. "WMI spectral enable cmd vdev id 0x%x\n",
  3530. vdev_id);
  3531. return 0;
  3532. err:
  3533. dev_kfree_skb(skb);
  3534. return ret;
  3535. }
  3536. int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
  3537. struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
  3538. {
  3539. struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
  3540. struct sk_buff *skb;
  3541. int ret;
  3542. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
  3543. if (!skb)
  3544. return -ENOMEM;
  3545. cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
  3546. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
  3547. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  3548. cmd->pdev_id = param->pdev_id;
  3549. cmd->module_id = param->module_id;
  3550. cmd->base_paddr_lo = param->base_paddr_lo;
  3551. cmd->base_paddr_hi = param->base_paddr_hi;
  3552. cmd->head_idx_paddr_lo = param->head_idx_paddr_lo;
  3553. cmd->head_idx_paddr_hi = param->head_idx_paddr_hi;
  3554. cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo;
  3555. cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi;
  3556. cmd->num_elems = param->num_elems;
  3557. cmd->buf_size = param->buf_size;
  3558. cmd->num_resp_per_event = param->num_resp_per_event;
  3559. cmd->event_timeout_ms = param->event_timeout_ms;
  3560. ret = ath11k_wmi_cmd_send(ar->wmi, skb,
  3561. WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
  3562. if (ret) {
  3563. ath11k_warn(ar->ab,
  3564. "failed to send dma ring cfg req wmi cmd\n");
  3565. goto err;
  3566. }
  3567. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  3568. "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
  3569. param->pdev_id);
  3570. return 0;
  3571. err:
  3572. dev_kfree_skb(skb);
  3573. return ret;
  3574. }
  3575. static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
  3576. u16 tag, u16 len,
  3577. const void *ptr, void *data)
  3578. {
  3579. struct wmi_tlv_dma_buf_release_parse *parse = data;
  3580. if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
  3581. return -EPROTO;
  3582. if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
  3583. return -ENOBUFS;
  3584. parse->num_buf_entry++;
  3585. return 0;
  3586. }
  3587. static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
  3588. u16 tag, u16 len,
  3589. const void *ptr, void *data)
  3590. {
  3591. struct wmi_tlv_dma_buf_release_parse *parse = data;
  3592. if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
  3593. return -EPROTO;
  3594. if (parse->num_meta >= parse->fixed.num_meta_data_entry)
  3595. return -ENOBUFS;
  3596. parse->num_meta++;
  3597. return 0;
  3598. }
  3599. static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
  3600. u16 tag, u16 len,
  3601. const void *ptr, void *data)
  3602. {
  3603. struct wmi_tlv_dma_buf_release_parse *parse = data;
  3604. int ret;
  3605. switch (tag) {
  3606. case WMI_TAG_DMA_BUF_RELEASE:
  3607. memcpy(&parse->fixed, ptr,
  3608. sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
  3609. parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
  3610. break;
  3611. case WMI_TAG_ARRAY_STRUCT:
  3612. if (!parse->buf_entry_done) {
  3613. parse->num_buf_entry = 0;
  3614. parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
  3615. ret = ath11k_wmi_tlv_iter(ab, ptr, len,
  3616. ath11k_wmi_tlv_dma_buf_entry_parse,
  3617. parse);
  3618. if (ret) {
  3619. ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
  3620. ret);
  3621. return ret;
  3622. }
  3623. parse->buf_entry_done = true;
  3624. } else if (!parse->meta_data_done) {
  3625. parse->num_meta = 0;
  3626. parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
  3627. ret = ath11k_wmi_tlv_iter(ab, ptr, len,
  3628. ath11k_wmi_tlv_dma_buf_meta_parse,
  3629. parse);
  3630. if (ret) {
  3631. ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
  3632. ret);
  3633. return ret;
  3634. }
  3635. parse->meta_data_done = true;
  3636. }
  3637. break;
  3638. default:
  3639. break;
  3640. }
  3641. return 0;
  3642. }
  3643. static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
  3644. struct sk_buff *skb)
  3645. {
  3646. struct wmi_tlv_dma_buf_release_parse parse = { };
  3647. struct ath11k_dbring_buf_release_event param;
  3648. int ret;
  3649. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  3650. ath11k_wmi_tlv_dma_buf_parse,
  3651. &parse);
  3652. if (ret) {
  3653. ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
  3654. return;
  3655. }
  3656. param.fixed = parse.fixed;
  3657. param.buf_entry = parse.buf_entry;
  3658. param.num_buf_entry = parse.num_buf_entry;
  3659. param.meta_data = parse.meta_data;
  3660. param.num_meta = parse.num_meta;
  3661. ret = ath11k_dbring_buffer_release_event(ab, &param);
  3662. if (ret) {
  3663. ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
  3664. return;
  3665. }
  3666. }
  3667. static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
  3668. u16 tag, u16 len,
  3669. const void *ptr, void *data)
  3670. {
  3671. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3672. struct wmi_hw_mode_capabilities *hw_mode_cap;
  3673. u32 phy_map = 0;
  3674. if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
  3675. return -EPROTO;
  3676. if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
  3677. return -ENOBUFS;
  3678. hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
  3679. hw_mode_id);
  3680. svc_rdy_ext->n_hw_mode_caps++;
  3681. phy_map = hw_mode_cap->phy_id_map;
  3682. while (phy_map) {
  3683. svc_rdy_ext->tot_phy_id++;
  3684. phy_map = phy_map >> 1;
  3685. }
  3686. return 0;
  3687. }
  3688. static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
  3689. u16 len, const void *ptr, void *data)
  3690. {
  3691. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3692. struct wmi_hw_mode_capabilities *hw_mode_caps;
  3693. enum wmi_host_hw_mode_config_type mode, pref;
  3694. u32 i;
  3695. int ret;
  3696. svc_rdy_ext->n_hw_mode_caps = 0;
  3697. svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
  3698. ret = ath11k_wmi_tlv_iter(soc, ptr, len,
  3699. ath11k_wmi_tlv_hw_mode_caps_parse,
  3700. svc_rdy_ext);
  3701. if (ret) {
  3702. ath11k_warn(soc, "failed to parse tlv %d\n", ret);
  3703. return ret;
  3704. }
  3705. i = 0;
  3706. while (i < svc_rdy_ext->n_hw_mode_caps) {
  3707. hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
  3708. mode = hw_mode_caps->hw_mode_id;
  3709. pref = soc->wmi_ab.preferred_hw_mode;
  3710. if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
  3711. svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
  3712. soc->wmi_ab.preferred_hw_mode = mode;
  3713. }
  3714. i++;
  3715. }
  3716. ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
  3717. soc->wmi_ab.preferred_hw_mode);
  3718. if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
  3719. return -EINVAL;
  3720. return 0;
  3721. }
  3722. static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
  3723. u16 tag, u16 len,
  3724. const void *ptr, void *data)
  3725. {
  3726. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3727. if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
  3728. return -EPROTO;
  3729. if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
  3730. return -ENOBUFS;
  3731. len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
  3732. if (!svc_rdy_ext->n_mac_phy_caps) {
  3733. svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
  3734. len, GFP_ATOMIC);
  3735. if (!svc_rdy_ext->mac_phy_caps)
  3736. return -ENOMEM;
  3737. }
  3738. memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
  3739. svc_rdy_ext->n_mac_phy_caps++;
  3740. return 0;
  3741. }
  3742. static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
  3743. u16 tag, u16 len,
  3744. const void *ptr, void *data)
  3745. {
  3746. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3747. if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
  3748. return -EPROTO;
  3749. if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
  3750. return -ENOBUFS;
  3751. svc_rdy_ext->n_ext_hal_reg_caps++;
  3752. return 0;
  3753. }
  3754. static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
  3755. u16 len, const void *ptr, void *data)
  3756. {
  3757. struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
  3758. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3759. struct ath11k_hal_reg_capabilities_ext reg_cap;
  3760. int ret;
  3761. u32 i;
  3762. svc_rdy_ext->n_ext_hal_reg_caps = 0;
  3763. svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
  3764. ret = ath11k_wmi_tlv_iter(soc, ptr, len,
  3765. ath11k_wmi_tlv_ext_hal_reg_caps_parse,
  3766. svc_rdy_ext);
  3767. if (ret) {
  3768. ath11k_warn(soc, "failed to parse tlv %d\n", ret);
  3769. return ret;
  3770. }
  3771. for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
  3772. ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
  3773. svc_rdy_ext->soc_hal_reg_caps,
  3774. svc_rdy_ext->ext_hal_reg_caps, i,
  3775. &reg_cap);
  3776. if (ret) {
  3777. ath11k_warn(soc, "failed to extract reg cap %d\n", i);
  3778. return ret;
  3779. }
  3780. memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
  3781. &reg_cap, sizeof(reg_cap));
  3782. }
  3783. return 0;
  3784. }
  3785. static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
  3786. u16 len, const void *ptr,
  3787. void *data)
  3788. {
  3789. struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
  3790. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3791. u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
  3792. u32 phy_id_map;
  3793. int pdev_index = 0;
  3794. int ret;
  3795. svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
  3796. svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
  3797. soc->num_radios = 0;
  3798. soc->target_pdev_count = 0;
  3799. phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
  3800. while (phy_id_map && soc->num_radios < MAX_RADIOS) {
  3801. ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
  3802. svc_rdy_ext->hw_caps,
  3803. svc_rdy_ext->hw_mode_caps,
  3804. svc_rdy_ext->soc_hal_reg_caps,
  3805. svc_rdy_ext->mac_phy_caps,
  3806. hw_mode_id, soc->num_radios,
  3807. &soc->pdevs[pdev_index]);
  3808. if (ret) {
  3809. ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
  3810. soc->num_radios);
  3811. return ret;
  3812. }
  3813. soc->num_radios++;
  3814. /* For QCA6390, save mac_phy capability in the same pdev */
  3815. if (soc->hw_params.single_pdev_only)
  3816. pdev_index = 0;
  3817. else
  3818. pdev_index = soc->num_radios;
  3819. /* TODO: mac_phy_cap prints */
  3820. phy_id_map >>= 1;
  3821. }
  3822. /* For QCA6390, set num_radios to 1 because host manages
  3823. * both 2G and 5G radio in one pdev.
  3824. * Set pdev_id = 0 and 0 means soc level.
  3825. */
  3826. if (soc->hw_params.single_pdev_only) {
  3827. soc->num_radios = 1;
  3828. soc->pdevs[0].pdev_id = 0;
  3829. }
  3830. return 0;
  3831. }
  3832. static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
  3833. u16 tag, u16 len,
  3834. const void *ptr, void *data)
  3835. {
  3836. struct wmi_tlv_dma_ring_caps_parse *parse = data;
  3837. if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
  3838. return -EPROTO;
  3839. parse->n_dma_ring_caps++;
  3840. return 0;
  3841. }
  3842. static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
  3843. u32 num_cap)
  3844. {
  3845. size_t sz;
  3846. void *ptr;
  3847. sz = num_cap * sizeof(struct ath11k_dbring_cap);
  3848. ptr = kzalloc(sz, GFP_ATOMIC);
  3849. if (!ptr)
  3850. return -ENOMEM;
  3851. ab->db_caps = ptr;
  3852. ab->num_db_cap = num_cap;
  3853. return 0;
  3854. }
  3855. static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
  3856. {
  3857. kfree(ab->db_caps);
  3858. ab->db_caps = NULL;
  3859. }
  3860. static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
  3861. u16 len, const void *ptr, void *data)
  3862. {
  3863. struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
  3864. struct wmi_dma_ring_capabilities *dma_caps;
  3865. struct ath11k_dbring_cap *dir_buff_caps;
  3866. int ret;
  3867. u32 i;
  3868. dma_caps_parse->n_dma_ring_caps = 0;
  3869. dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
  3870. ret = ath11k_wmi_tlv_iter(ab, ptr, len,
  3871. ath11k_wmi_tlv_dma_ring_caps_parse,
  3872. dma_caps_parse);
  3873. if (ret) {
  3874. ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
  3875. return ret;
  3876. }
  3877. if (!dma_caps_parse->n_dma_ring_caps)
  3878. return 0;
  3879. if (ab->num_db_cap) {
  3880. ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
  3881. return 0;
  3882. }
  3883. ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
  3884. if (ret)
  3885. return ret;
  3886. dir_buff_caps = ab->db_caps;
  3887. for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
  3888. if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
  3889. ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
  3890. ret = -EINVAL;
  3891. goto free_dir_buff;
  3892. }
  3893. dir_buff_caps[i].id = dma_caps[i].module_id;
  3894. dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
  3895. dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
  3896. dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
  3897. dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
  3898. }
  3899. return 0;
  3900. free_dir_buff:
  3901. ath11k_wmi_free_dbring_caps(ab);
  3902. return ret;
  3903. }
  3904. static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
  3905. u16 tag, u16 len,
  3906. const void *ptr, void *data)
  3907. {
  3908. struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
  3909. struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
  3910. int ret;
  3911. switch (tag) {
  3912. case WMI_TAG_SERVICE_READY_EXT_EVENT:
  3913. ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
  3914. &svc_rdy_ext->param);
  3915. if (ret) {
  3916. ath11k_warn(ab, "unable to extract ext params\n");
  3917. return ret;
  3918. }
  3919. break;
  3920. case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
  3921. svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
  3922. svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
  3923. break;
  3924. case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
  3925. ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
  3926. svc_rdy_ext);
  3927. if (ret)
  3928. return ret;
  3929. break;
  3930. case WMI_TAG_ARRAY_STRUCT:
  3931. if (!svc_rdy_ext->hw_mode_done) {
  3932. ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
  3933. svc_rdy_ext);
  3934. if (ret)
  3935. return ret;
  3936. svc_rdy_ext->hw_mode_done = true;
  3937. } else if (!svc_rdy_ext->mac_phy_done) {
  3938. svc_rdy_ext->n_mac_phy_caps = 0;
  3939. ret = ath11k_wmi_tlv_iter(ab, ptr, len,
  3940. ath11k_wmi_tlv_mac_phy_caps_parse,
  3941. svc_rdy_ext);
  3942. if (ret) {
  3943. ath11k_warn(ab, "failed to parse tlv %d\n", ret);
  3944. return ret;
  3945. }
  3946. svc_rdy_ext->mac_phy_done = true;
  3947. } else if (!svc_rdy_ext->ext_hal_reg_done) {
  3948. ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
  3949. svc_rdy_ext);
  3950. if (ret)
  3951. return ret;
  3952. svc_rdy_ext->ext_hal_reg_done = true;
  3953. } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
  3954. svc_rdy_ext->mac_phy_chainmask_combo_done = true;
  3955. } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
  3956. svc_rdy_ext->mac_phy_chainmask_cap_done = true;
  3957. } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
  3958. svc_rdy_ext->oem_dma_ring_cap_done = true;
  3959. } else if (!svc_rdy_ext->dma_ring_cap_done) {
  3960. ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
  3961. &svc_rdy_ext->dma_caps_parse);
  3962. if (ret)
  3963. return ret;
  3964. svc_rdy_ext->dma_ring_cap_done = true;
  3965. }
  3966. break;
  3967. default:
  3968. break;
  3969. }
  3970. return 0;
  3971. }
  3972. static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
  3973. struct sk_buff *skb)
  3974. {
  3975. struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
  3976. int ret;
  3977. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  3978. ath11k_wmi_tlv_svc_rdy_ext_parse,
  3979. &svc_rdy_ext);
  3980. if (ret) {
  3981. ath11k_warn(ab, "failed to parse tlv %d\n", ret);
  3982. goto err;
  3983. }
  3984. if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
  3985. complete(&ab->wmi_ab.service_ready);
  3986. kfree(svc_rdy_ext.mac_phy_caps);
  3987. return 0;
  3988. err:
  3989. ath11k_wmi_free_dbring_caps(ab);
  3990. return ret;
  3991. }
  3992. static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
  3993. u16 tag, u16 len,
  3994. const void *ptr, void *data)
  3995. {
  3996. struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
  3997. int ret;
  3998. switch (tag) {
  3999. case WMI_TAG_ARRAY_STRUCT:
  4000. if (!parse->dma_ring_cap_done) {
  4001. ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
  4002. &parse->dma_caps_parse);
  4003. if (ret)
  4004. return ret;
  4005. parse->dma_ring_cap_done = true;
  4006. }
  4007. break;
  4008. default:
  4009. break;
  4010. }
  4011. return 0;
  4012. }
  4013. static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
  4014. struct sk_buff *skb)
  4015. {
  4016. struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
  4017. int ret;
  4018. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  4019. ath11k_wmi_tlv_svc_rdy_ext2_parse,
  4020. &svc_rdy_ext2);
  4021. if (ret) {
  4022. ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
  4023. goto err;
  4024. }
  4025. complete(&ab->wmi_ab.service_ready);
  4026. return 0;
  4027. err:
  4028. ath11k_wmi_free_dbring_caps(ab);
  4029. return ret;
  4030. }
  4031. static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
  4032. struct wmi_vdev_start_resp_event *vdev_rsp)
  4033. {
  4034. const void **tb;
  4035. const struct wmi_vdev_start_resp_event *ev;
  4036. int ret;
  4037. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4038. if (IS_ERR(tb)) {
  4039. ret = PTR_ERR(tb);
  4040. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4041. return ret;
  4042. }
  4043. ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
  4044. if (!ev) {
  4045. ath11k_warn(ab, "failed to fetch vdev start resp ev");
  4046. kfree(tb);
  4047. return -EPROTO;
  4048. }
  4049. memset(vdev_rsp, 0, sizeof(*vdev_rsp));
  4050. vdev_rsp->vdev_id = ev->vdev_id;
  4051. vdev_rsp->requestor_id = ev->requestor_id;
  4052. vdev_rsp->resp_type = ev->resp_type;
  4053. vdev_rsp->status = ev->status;
  4054. vdev_rsp->chain_mask = ev->chain_mask;
  4055. vdev_rsp->smps_mode = ev->smps_mode;
  4056. vdev_rsp->mac_id = ev->mac_id;
  4057. vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
  4058. vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
  4059. kfree(tb);
  4060. return 0;
  4061. }
  4062. static struct cur_reg_rule
  4063. *create_reg_rules_from_wmi(u32 num_reg_rules,
  4064. struct wmi_regulatory_rule_struct *wmi_reg_rule)
  4065. {
  4066. struct cur_reg_rule *reg_rule_ptr;
  4067. u32 count;
  4068. reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
  4069. GFP_ATOMIC);
  4070. if (!reg_rule_ptr)
  4071. return NULL;
  4072. for (count = 0; count < num_reg_rules; count++) {
  4073. reg_rule_ptr[count].start_freq =
  4074. FIELD_GET(REG_RULE_START_FREQ,
  4075. wmi_reg_rule[count].freq_info);
  4076. reg_rule_ptr[count].end_freq =
  4077. FIELD_GET(REG_RULE_END_FREQ,
  4078. wmi_reg_rule[count].freq_info);
  4079. reg_rule_ptr[count].max_bw =
  4080. FIELD_GET(REG_RULE_MAX_BW,
  4081. wmi_reg_rule[count].bw_pwr_info);
  4082. reg_rule_ptr[count].reg_power =
  4083. FIELD_GET(REG_RULE_REG_PWR,
  4084. wmi_reg_rule[count].bw_pwr_info);
  4085. reg_rule_ptr[count].ant_gain =
  4086. FIELD_GET(REG_RULE_ANT_GAIN,
  4087. wmi_reg_rule[count].bw_pwr_info);
  4088. reg_rule_ptr[count].flags =
  4089. FIELD_GET(REG_RULE_FLAGS,
  4090. wmi_reg_rule[count].flag_info);
  4091. }
  4092. return reg_rule_ptr;
  4093. }
  4094. static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
  4095. struct sk_buff *skb,
  4096. struct cur_regulatory_info *reg_info)
  4097. {
  4098. const void **tb;
  4099. const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
  4100. struct wmi_regulatory_rule_struct *wmi_reg_rule;
  4101. u32 num_2g_reg_rules, num_5g_reg_rules;
  4102. int ret;
  4103. ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
  4104. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4105. if (IS_ERR(tb)) {
  4106. ret = PTR_ERR(tb);
  4107. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4108. return ret;
  4109. }
  4110. chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
  4111. if (!chan_list_event_hdr) {
  4112. ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
  4113. kfree(tb);
  4114. return -EPROTO;
  4115. }
  4116. reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules;
  4117. reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules;
  4118. if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) {
  4119. ath11k_warn(ab, "No regulatory rules available in the event info\n");
  4120. kfree(tb);
  4121. return -EINVAL;
  4122. }
  4123. memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
  4124. REG_ALPHA2_LEN);
  4125. reg_info->dfs_region = chan_list_event_hdr->dfs_region;
  4126. reg_info->phybitmap = chan_list_event_hdr->phybitmap;
  4127. reg_info->num_phy = chan_list_event_hdr->num_phy;
  4128. reg_info->phy_id = chan_list_event_hdr->phy_id;
  4129. reg_info->ctry_code = chan_list_event_hdr->country_id;
  4130. reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
  4131. if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS)
  4132. reg_info->status_code = REG_SET_CC_STATUS_PASS;
  4133. else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND)
  4134. reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
  4135. else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND)
  4136. reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
  4137. else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED)
  4138. reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
  4139. else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY)
  4140. reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
  4141. else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL)
  4142. reg_info->status_code = REG_SET_CC_STATUS_FAIL;
  4143. reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
  4144. reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
  4145. reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;
  4146. reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g;
  4147. num_2g_reg_rules = reg_info->num_2g_reg_rules;
  4148. num_5g_reg_rules = reg_info->num_5g_reg_rules;
  4149. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4150. "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
  4151. __func__, reg_info->alpha2, reg_info->dfs_region,
  4152. reg_info->min_bw_2g, reg_info->max_bw_2g,
  4153. reg_info->min_bw_5g, reg_info->max_bw_5g);
  4154. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4155. "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__,
  4156. num_2g_reg_rules, num_5g_reg_rules);
  4157. wmi_reg_rule =
  4158. (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
  4159. + sizeof(*chan_list_event_hdr)
  4160. + sizeof(struct wmi_tlv));
  4161. if (num_2g_reg_rules) {
  4162. reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules,
  4163. wmi_reg_rule);
  4164. if (!reg_info->reg_rules_2g_ptr) {
  4165. kfree(tb);
  4166. ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n");
  4167. return -ENOMEM;
  4168. }
  4169. }
  4170. if (num_5g_reg_rules) {
  4171. wmi_reg_rule += num_2g_reg_rules;
  4172. reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules,
  4173. wmi_reg_rule);
  4174. if (!reg_info->reg_rules_5g_ptr) {
  4175. kfree(tb);
  4176. ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n");
  4177. return -ENOMEM;
  4178. }
  4179. }
  4180. ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
  4181. kfree(tb);
  4182. return 0;
  4183. }
  4184. static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4185. struct wmi_peer_delete_resp_event *peer_del_resp)
  4186. {
  4187. const void **tb;
  4188. const struct wmi_peer_delete_resp_event *ev;
  4189. int ret;
  4190. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4191. if (IS_ERR(tb)) {
  4192. ret = PTR_ERR(tb);
  4193. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4194. return ret;
  4195. }
  4196. ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
  4197. if (!ev) {
  4198. ath11k_warn(ab, "failed to fetch peer delete resp ev");
  4199. kfree(tb);
  4200. return -EPROTO;
  4201. }
  4202. memset(peer_del_resp, 0, sizeof(*peer_del_resp));
  4203. peer_del_resp->vdev_id = ev->vdev_id;
  4204. ether_addr_copy(peer_del_resp->peer_macaddr.addr,
  4205. ev->peer_macaddr.addr);
  4206. kfree(tb);
  4207. return 0;
  4208. }
  4209. static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
  4210. struct sk_buff *skb,
  4211. u32 *vdev_id)
  4212. {
  4213. const void **tb;
  4214. const struct wmi_vdev_delete_resp_event *ev;
  4215. int ret;
  4216. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4217. if (IS_ERR(tb)) {
  4218. ret = PTR_ERR(tb);
  4219. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4220. return ret;
  4221. }
  4222. ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
  4223. if (!ev) {
  4224. ath11k_warn(ab, "failed to fetch vdev delete resp ev");
  4225. kfree(tb);
  4226. return -EPROTO;
  4227. }
  4228. *vdev_id = ev->vdev_id;
  4229. kfree(tb);
  4230. return 0;
  4231. }
  4232. static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
  4233. u32 len, u32 *vdev_id,
  4234. u32 *tx_status)
  4235. {
  4236. const void **tb;
  4237. const struct wmi_bcn_tx_status_event *ev;
  4238. int ret;
  4239. tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
  4240. if (IS_ERR(tb)) {
  4241. ret = PTR_ERR(tb);
  4242. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4243. return ret;
  4244. }
  4245. ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
  4246. if (!ev) {
  4247. ath11k_warn(ab, "failed to fetch bcn tx status ev");
  4248. kfree(tb);
  4249. return -EPROTO;
  4250. }
  4251. *vdev_id = ev->vdev_id;
  4252. *tx_status = ev->tx_status;
  4253. kfree(tb);
  4254. return 0;
  4255. }
  4256. static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
  4257. u32 *vdev_id)
  4258. {
  4259. const void **tb;
  4260. const struct wmi_vdev_stopped_event *ev;
  4261. int ret;
  4262. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4263. if (IS_ERR(tb)) {
  4264. ret = PTR_ERR(tb);
  4265. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4266. return ret;
  4267. }
  4268. ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
  4269. if (!ev) {
  4270. ath11k_warn(ab, "failed to fetch vdev stop ev");
  4271. kfree(tb);
  4272. return -EPROTO;
  4273. }
  4274. *vdev_id = ev->vdev_id;
  4275. kfree(tb);
  4276. return 0;
  4277. }
  4278. static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
  4279. struct sk_buff *skb,
  4280. struct mgmt_rx_event_params *hdr)
  4281. {
  4282. const void **tb;
  4283. const struct wmi_mgmt_rx_hdr *ev;
  4284. const u8 *frame;
  4285. int ret;
  4286. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4287. if (IS_ERR(tb)) {
  4288. ret = PTR_ERR(tb);
  4289. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4290. return ret;
  4291. }
  4292. ev = tb[WMI_TAG_MGMT_RX_HDR];
  4293. frame = tb[WMI_TAG_ARRAY_BYTE];
  4294. if (!ev || !frame) {
  4295. ath11k_warn(ab, "failed to fetch mgmt rx hdr");
  4296. kfree(tb);
  4297. return -EPROTO;
  4298. }
  4299. hdr->pdev_id = ev->pdev_id;
  4300. hdr->chan_freq = ev->chan_freq;
  4301. hdr->channel = ev->channel;
  4302. hdr->snr = ev->snr;
  4303. hdr->rate = ev->rate;
  4304. hdr->phy_mode = ev->phy_mode;
  4305. hdr->buf_len = ev->buf_len;
  4306. hdr->status = ev->status;
  4307. hdr->flags = ev->flags;
  4308. hdr->rssi = ev->rssi;
  4309. hdr->tsf_delta = ev->tsf_delta;
  4310. memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
  4311. if (skb->len < (frame - skb->data) + hdr->buf_len) {
  4312. ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
  4313. kfree(tb);
  4314. return -EPROTO;
  4315. }
  4316. /* shift the sk_buff to point to `frame` */
  4317. skb_trim(skb, 0);
  4318. skb_put(skb, frame - skb->data);
  4319. skb_pull(skb, frame - skb->data);
  4320. skb_put(skb, hdr->buf_len);
  4321. ath11k_ce_byte_swap(skb->data, hdr->buf_len);
  4322. kfree(tb);
  4323. return 0;
  4324. }
  4325. static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
  4326. u32 status)
  4327. {
  4328. struct sk_buff *msdu;
  4329. struct ieee80211_tx_info *info;
  4330. struct ath11k_skb_cb *skb_cb;
  4331. int num_mgmt;
  4332. spin_lock_bh(&ar->txmgmt_idr_lock);
  4333. msdu = idr_find(&ar->txmgmt_idr, desc_id);
  4334. if (!msdu) {
  4335. ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
  4336. desc_id);
  4337. spin_unlock_bh(&ar->txmgmt_idr_lock);
  4338. return -ENOENT;
  4339. }
  4340. idr_remove(&ar->txmgmt_idr, desc_id);
  4341. spin_unlock_bh(&ar->txmgmt_idr_lock);
  4342. skb_cb = ATH11K_SKB_CB(msdu);
  4343. dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
  4344. info = IEEE80211_SKB_CB(msdu);
  4345. if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
  4346. info->flags |= IEEE80211_TX_STAT_ACK;
  4347. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  4348. num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
  4349. /* WARN when we received this event without doing any mgmt tx */
  4350. if (num_mgmt < 0)
  4351. WARN_ON_ONCE(1);
  4352. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  4353. "wmi mgmt tx comp pending %d desc id %d\n",
  4354. num_mgmt, desc_id);
  4355. if (!num_mgmt)
  4356. wake_up(&ar->txmgmt_empty_waitq);
  4357. return 0;
  4358. }
  4359. static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
  4360. struct sk_buff *skb,
  4361. struct wmi_mgmt_tx_compl_event *param)
  4362. {
  4363. const void **tb;
  4364. const struct wmi_mgmt_tx_compl_event *ev;
  4365. int ret;
  4366. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4367. if (IS_ERR(tb)) {
  4368. ret = PTR_ERR(tb);
  4369. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4370. return ret;
  4371. }
  4372. ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
  4373. if (!ev) {
  4374. ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
  4375. kfree(tb);
  4376. return -EPROTO;
  4377. }
  4378. param->pdev_id = ev->pdev_id;
  4379. param->desc_id = ev->desc_id;
  4380. param->status = ev->status;
  4381. kfree(tb);
  4382. return 0;
  4383. }
  4384. static void ath11k_wmi_event_scan_started(struct ath11k *ar)
  4385. {
  4386. lockdep_assert_held(&ar->data_lock);
  4387. switch (ar->scan.state) {
  4388. case ATH11K_SCAN_IDLE:
  4389. case ATH11K_SCAN_RUNNING:
  4390. case ATH11K_SCAN_ABORTING:
  4391. ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
  4392. ath11k_scan_state_str(ar->scan.state),
  4393. ar->scan.state);
  4394. break;
  4395. case ATH11K_SCAN_STARTING:
  4396. ar->scan.state = ATH11K_SCAN_RUNNING;
  4397. if (ar->scan.is_roc)
  4398. ieee80211_ready_on_channel(ar->hw);
  4399. complete(&ar->scan.started);
  4400. break;
  4401. }
  4402. }
  4403. static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
  4404. {
  4405. lockdep_assert_held(&ar->data_lock);
  4406. switch (ar->scan.state) {
  4407. case ATH11K_SCAN_IDLE:
  4408. case ATH11K_SCAN_RUNNING:
  4409. case ATH11K_SCAN_ABORTING:
  4410. ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
  4411. ath11k_scan_state_str(ar->scan.state),
  4412. ar->scan.state);
  4413. break;
  4414. case ATH11K_SCAN_STARTING:
  4415. complete(&ar->scan.started);
  4416. __ath11k_mac_scan_finish(ar);
  4417. break;
  4418. }
  4419. }
  4420. static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
  4421. {
  4422. lockdep_assert_held(&ar->data_lock);
  4423. switch (ar->scan.state) {
  4424. case ATH11K_SCAN_IDLE:
  4425. case ATH11K_SCAN_STARTING:
  4426. /* One suspected reason scan can be completed while starting is
  4427. * if firmware fails to deliver all scan events to the host,
  4428. * e.g. when transport pipe is full. This has been observed
  4429. * with spectral scan phyerr events starving wmi transport
  4430. * pipe. In such case the "scan completed" event should be (and
  4431. * is) ignored by the host as it may be just firmware's scan
  4432. * state machine recovering.
  4433. */
  4434. ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
  4435. ath11k_scan_state_str(ar->scan.state),
  4436. ar->scan.state);
  4437. break;
  4438. case ATH11K_SCAN_RUNNING:
  4439. case ATH11K_SCAN_ABORTING:
  4440. __ath11k_mac_scan_finish(ar);
  4441. break;
  4442. }
  4443. }
  4444. static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
  4445. {
  4446. lockdep_assert_held(&ar->data_lock);
  4447. switch (ar->scan.state) {
  4448. case ATH11K_SCAN_IDLE:
  4449. case ATH11K_SCAN_STARTING:
  4450. ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  4451. ath11k_scan_state_str(ar->scan.state),
  4452. ar->scan.state);
  4453. break;
  4454. case ATH11K_SCAN_RUNNING:
  4455. case ATH11K_SCAN_ABORTING:
  4456. ar->scan_channel = NULL;
  4457. break;
  4458. }
  4459. }
  4460. static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
  4461. {
  4462. lockdep_assert_held(&ar->data_lock);
  4463. switch (ar->scan.state) {
  4464. case ATH11K_SCAN_IDLE:
  4465. case ATH11K_SCAN_STARTING:
  4466. ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  4467. ath11k_scan_state_str(ar->scan.state),
  4468. ar->scan.state);
  4469. break;
  4470. case ATH11K_SCAN_RUNNING:
  4471. case ATH11K_SCAN_ABORTING:
  4472. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  4473. if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  4474. complete(&ar->scan.on_channel);
  4475. break;
  4476. }
  4477. }
  4478. static const char *
  4479. ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  4480. enum wmi_scan_completion_reason reason)
  4481. {
  4482. switch (type) {
  4483. case WMI_SCAN_EVENT_STARTED:
  4484. return "started";
  4485. case WMI_SCAN_EVENT_COMPLETED:
  4486. switch (reason) {
  4487. case WMI_SCAN_REASON_COMPLETED:
  4488. return "completed";
  4489. case WMI_SCAN_REASON_CANCELLED:
  4490. return "completed [cancelled]";
  4491. case WMI_SCAN_REASON_PREEMPTED:
  4492. return "completed [preempted]";
  4493. case WMI_SCAN_REASON_TIMEDOUT:
  4494. return "completed [timedout]";
  4495. case WMI_SCAN_REASON_INTERNAL_FAILURE:
  4496. return "completed [internal err]";
  4497. case WMI_SCAN_REASON_MAX:
  4498. break;
  4499. }
  4500. return "completed [unknown]";
  4501. case WMI_SCAN_EVENT_BSS_CHANNEL:
  4502. return "bss channel";
  4503. case WMI_SCAN_EVENT_FOREIGN_CHAN:
  4504. return "foreign channel";
  4505. case WMI_SCAN_EVENT_DEQUEUED:
  4506. return "dequeued";
  4507. case WMI_SCAN_EVENT_PREEMPTED:
  4508. return "preempted";
  4509. case WMI_SCAN_EVENT_START_FAILED:
  4510. return "start failed";
  4511. case WMI_SCAN_EVENT_RESTARTED:
  4512. return "restarted";
  4513. case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
  4514. return "foreign channel exit";
  4515. default:
  4516. return "unknown";
  4517. }
  4518. }
  4519. static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4520. struct wmi_scan_event *scan_evt_param)
  4521. {
  4522. const void **tb;
  4523. const struct wmi_scan_event *ev;
  4524. int ret;
  4525. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4526. if (IS_ERR(tb)) {
  4527. ret = PTR_ERR(tb);
  4528. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4529. return ret;
  4530. }
  4531. ev = tb[WMI_TAG_SCAN_EVENT];
  4532. if (!ev) {
  4533. ath11k_warn(ab, "failed to fetch scan ev");
  4534. kfree(tb);
  4535. return -EPROTO;
  4536. }
  4537. scan_evt_param->event_type = ev->event_type;
  4538. scan_evt_param->reason = ev->reason;
  4539. scan_evt_param->channel_freq = ev->channel_freq;
  4540. scan_evt_param->scan_req_id = ev->scan_req_id;
  4541. scan_evt_param->scan_id = ev->scan_id;
  4542. scan_evt_param->vdev_id = ev->vdev_id;
  4543. scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
  4544. kfree(tb);
  4545. return 0;
  4546. }
  4547. static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4548. struct wmi_peer_sta_kickout_arg *arg)
  4549. {
  4550. const void **tb;
  4551. const struct wmi_peer_sta_kickout_event *ev;
  4552. int ret;
  4553. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4554. if (IS_ERR(tb)) {
  4555. ret = PTR_ERR(tb);
  4556. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4557. return ret;
  4558. }
  4559. ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
  4560. if (!ev) {
  4561. ath11k_warn(ab, "failed to fetch peer sta kickout ev");
  4562. kfree(tb);
  4563. return -EPROTO;
  4564. }
  4565. arg->mac_addr = ev->peer_macaddr.addr;
  4566. kfree(tb);
  4567. return 0;
  4568. }
  4569. static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4570. struct wmi_roam_event *roam_ev)
  4571. {
  4572. const void **tb;
  4573. const struct wmi_roam_event *ev;
  4574. int ret;
  4575. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4576. if (IS_ERR(tb)) {
  4577. ret = PTR_ERR(tb);
  4578. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4579. return ret;
  4580. }
  4581. ev = tb[WMI_TAG_ROAM_EVENT];
  4582. if (!ev) {
  4583. ath11k_warn(ab, "failed to fetch roam ev");
  4584. kfree(tb);
  4585. return -EPROTO;
  4586. }
  4587. roam_ev->vdev_id = ev->vdev_id;
  4588. roam_ev->reason = ev->reason;
  4589. roam_ev->rssi = ev->rssi;
  4590. kfree(tb);
  4591. return 0;
  4592. }
  4593. static int freq_to_idx(struct ath11k *ar, int freq)
  4594. {
  4595. struct ieee80211_supported_band *sband;
  4596. int band, ch, idx = 0;
  4597. for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
  4598. sband = ar->hw->wiphy->bands[band];
  4599. if (!sband)
  4600. continue;
  4601. for (ch = 0; ch < sband->n_channels; ch++, idx++)
  4602. if (sband->channels[ch].center_freq == freq)
  4603. goto exit;
  4604. }
  4605. exit:
  4606. return idx;
  4607. }
  4608. static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
  4609. u32 len, struct wmi_chan_info_event *ch_info_ev)
  4610. {
  4611. const void **tb;
  4612. const struct wmi_chan_info_event *ev;
  4613. int ret;
  4614. tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
  4615. if (IS_ERR(tb)) {
  4616. ret = PTR_ERR(tb);
  4617. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4618. return ret;
  4619. }
  4620. ev = tb[WMI_TAG_CHAN_INFO_EVENT];
  4621. if (!ev) {
  4622. ath11k_warn(ab, "failed to fetch chan info ev");
  4623. kfree(tb);
  4624. return -EPROTO;
  4625. }
  4626. ch_info_ev->err_code = ev->err_code;
  4627. ch_info_ev->freq = ev->freq;
  4628. ch_info_ev->cmd_flags = ev->cmd_flags;
  4629. ch_info_ev->noise_floor = ev->noise_floor;
  4630. ch_info_ev->rx_clear_count = ev->rx_clear_count;
  4631. ch_info_ev->cycle_count = ev->cycle_count;
  4632. ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
  4633. ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
  4634. ch_info_ev->rx_frame_count = ev->rx_frame_count;
  4635. ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
  4636. ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
  4637. ch_info_ev->vdev_id = ev->vdev_id;
  4638. kfree(tb);
  4639. return 0;
  4640. }
  4641. static int
  4642. ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4643. struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
  4644. {
  4645. const void **tb;
  4646. const struct wmi_pdev_bss_chan_info_event *ev;
  4647. int ret;
  4648. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4649. if (IS_ERR(tb)) {
  4650. ret = PTR_ERR(tb);
  4651. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4652. return ret;
  4653. }
  4654. ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
  4655. if (!ev) {
  4656. ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
  4657. kfree(tb);
  4658. return -EPROTO;
  4659. }
  4660. bss_ch_info_ev->pdev_id = ev->pdev_id;
  4661. bss_ch_info_ev->freq = ev->freq;
  4662. bss_ch_info_ev->noise_floor = ev->noise_floor;
  4663. bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
  4664. bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
  4665. bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
  4666. bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
  4667. bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
  4668. bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
  4669. bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
  4670. bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
  4671. bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
  4672. bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
  4673. kfree(tb);
  4674. return 0;
  4675. }
  4676. static int
  4677. ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4678. struct wmi_vdev_install_key_complete_arg *arg)
  4679. {
  4680. const void **tb;
  4681. const struct wmi_vdev_install_key_compl_event *ev;
  4682. int ret;
  4683. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4684. if (IS_ERR(tb)) {
  4685. ret = PTR_ERR(tb);
  4686. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4687. return ret;
  4688. }
  4689. ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
  4690. if (!ev) {
  4691. ath11k_warn(ab, "failed to fetch vdev install key compl ev");
  4692. kfree(tb);
  4693. return -EPROTO;
  4694. }
  4695. arg->vdev_id = ev->vdev_id;
  4696. arg->macaddr = ev->peer_macaddr.addr;
  4697. arg->key_idx = ev->key_idx;
  4698. arg->key_flags = ev->key_flags;
  4699. arg->status = ev->status;
  4700. kfree(tb);
  4701. return 0;
  4702. }
  4703. static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
  4704. struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
  4705. {
  4706. const void **tb;
  4707. const struct wmi_peer_assoc_conf_event *ev;
  4708. int ret;
  4709. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  4710. if (IS_ERR(tb)) {
  4711. ret = PTR_ERR(tb);
  4712. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  4713. return ret;
  4714. }
  4715. ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
  4716. if (!ev) {
  4717. ath11k_warn(ab, "failed to fetch peer assoc conf ev");
  4718. kfree(tb);
  4719. return -EPROTO;
  4720. }
  4721. peer_assoc_conf->vdev_id = ev->vdev_id;
  4722. peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
  4723. kfree(tb);
  4724. return 0;
  4725. }
  4726. static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
  4727. struct ath11k_fw_stats_pdev *dst)
  4728. {
  4729. dst->ch_noise_floor = src->chan_nf;
  4730. dst->tx_frame_count = src->tx_frame_count;
  4731. dst->rx_frame_count = src->rx_frame_count;
  4732. dst->rx_clear_count = src->rx_clear_count;
  4733. dst->cycle_count = src->cycle_count;
  4734. dst->phy_err_count = src->phy_err_count;
  4735. dst->chan_tx_power = src->chan_tx_pwr;
  4736. }
  4737. static void
  4738. ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
  4739. struct ath11k_fw_stats_pdev *dst)
  4740. {
  4741. dst->comp_queued = src->comp_queued;
  4742. dst->comp_delivered = src->comp_delivered;
  4743. dst->msdu_enqued = src->msdu_enqued;
  4744. dst->mpdu_enqued = src->mpdu_enqued;
  4745. dst->wmm_drop = src->wmm_drop;
  4746. dst->local_enqued = src->local_enqued;
  4747. dst->local_freed = src->local_freed;
  4748. dst->hw_queued = src->hw_queued;
  4749. dst->hw_reaped = src->hw_reaped;
  4750. dst->underrun = src->underrun;
  4751. dst->hw_paused = src->hw_paused;
  4752. dst->tx_abort = src->tx_abort;
  4753. dst->mpdus_requeued = src->mpdus_requeued;
  4754. dst->tx_ko = src->tx_ko;
  4755. dst->tx_xretry = src->tx_xretry;
  4756. dst->data_rc = src->data_rc;
  4757. dst->self_triggers = src->self_triggers;
  4758. dst->sw_retry_failure = src->sw_retry_failure;
  4759. dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
  4760. dst->pdev_cont_xretry = src->pdev_cont_xretry;
  4761. dst->pdev_tx_timeout = src->pdev_tx_timeout;
  4762. dst->pdev_resets = src->pdev_resets;
  4763. dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
  4764. dst->phy_underrun = src->phy_underrun;
  4765. dst->txop_ovf = src->txop_ovf;
  4766. dst->seq_posted = src->seq_posted;
  4767. dst->seq_failed_queueing = src->seq_failed_queueing;
  4768. dst->seq_completed = src->seq_completed;
  4769. dst->seq_restarted = src->seq_restarted;
  4770. dst->mu_seq_posted = src->mu_seq_posted;
  4771. dst->mpdus_sw_flush = src->mpdus_sw_flush;
  4772. dst->mpdus_hw_filter = src->mpdus_hw_filter;
  4773. dst->mpdus_truncated = src->mpdus_truncated;
  4774. dst->mpdus_ack_failed = src->mpdus_ack_failed;
  4775. dst->mpdus_expired = src->mpdus_expired;
  4776. }
  4777. static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
  4778. struct ath11k_fw_stats_pdev *dst)
  4779. {
  4780. dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
  4781. dst->status_rcvd = src->status_rcvd;
  4782. dst->r0_frags = src->r0_frags;
  4783. dst->r1_frags = src->r1_frags;
  4784. dst->r2_frags = src->r2_frags;
  4785. dst->r3_frags = src->r3_frags;
  4786. dst->htt_msdus = src->htt_msdus;
  4787. dst->htt_mpdus = src->htt_mpdus;
  4788. dst->loc_msdus = src->loc_msdus;
  4789. dst->loc_mpdus = src->loc_mpdus;
  4790. dst->oversize_amsdu = src->oversize_amsdu;
  4791. dst->phy_errs = src->phy_errs;
  4792. dst->phy_err_drop = src->phy_err_drop;
  4793. dst->mpdu_errs = src->mpdu_errs;
  4794. dst->rx_ovfl_errs = src->rx_ovfl_errs;
  4795. }
  4796. static void
  4797. ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
  4798. struct ath11k_fw_stats_vdev *dst)
  4799. {
  4800. int i;
  4801. dst->vdev_id = src->vdev_id;
  4802. dst->beacon_snr = src->beacon_snr;
  4803. dst->data_snr = src->data_snr;
  4804. dst->num_rx_frames = src->num_rx_frames;
  4805. dst->num_rts_fail = src->num_rts_fail;
  4806. dst->num_rts_success = src->num_rts_success;
  4807. dst->num_rx_err = src->num_rx_err;
  4808. dst->num_rx_discard = src->num_rx_discard;
  4809. dst->num_tx_not_acked = src->num_tx_not_acked;
  4810. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
  4811. dst->num_tx_frames[i] = src->num_tx_frames[i];
  4812. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
  4813. dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
  4814. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
  4815. dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
  4816. for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
  4817. dst->tx_rate_history[i] = src->tx_rate_history[i];
  4818. for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
  4819. dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
  4820. }
  4821. static void
  4822. ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
  4823. struct ath11k_fw_stats_bcn *dst)
  4824. {
  4825. dst->vdev_id = src->vdev_id;
  4826. dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
  4827. dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
  4828. }
  4829. static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
  4830. u16 tag, u16 len,
  4831. const void *ptr, void *data)
  4832. {
  4833. struct wmi_tlv_fw_stats_parse *parse = data;
  4834. const struct wmi_stats_event *ev = parse->ev;
  4835. struct ath11k_fw_stats *stats = parse->stats;
  4836. struct ath11k *ar;
  4837. struct ath11k_vif *arvif;
  4838. struct ieee80211_sta *sta;
  4839. struct ath11k_sta *arsta;
  4840. const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
  4841. int j, ret = 0;
  4842. if (tag != WMI_TAG_RSSI_STATS)
  4843. return -EPROTO;
  4844. rcu_read_lock();
  4845. ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
  4846. stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
  4847. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4848. "wmi stats vdev id %d mac %pM\n",
  4849. stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
  4850. arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
  4851. if (!arvif) {
  4852. ath11k_warn(ab, "not found vif for vdev id %d\n",
  4853. stats_rssi->vdev_id);
  4854. ret = -EPROTO;
  4855. goto exit;
  4856. }
  4857. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4858. "wmi stats bssid %pM vif %pK\n",
  4859. arvif->bssid, arvif->vif);
  4860. sta = ieee80211_find_sta_by_ifaddr(ar->hw,
  4861. arvif->bssid,
  4862. NULL);
  4863. if (!sta) {
  4864. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4865. "not found station of bssid %pM for rssi chain\n",
  4866. arvif->bssid);
  4867. goto exit;
  4868. }
  4869. arsta = (struct ath11k_sta *)sta->drv_priv;
  4870. BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
  4871. ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
  4872. for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
  4873. arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
  4874. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4875. "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n",
  4876. j,
  4877. stats_rssi->rssi_avg_beacon[j],
  4878. j,
  4879. stats_rssi->rssi_avg_data[j]);
  4880. }
  4881. exit:
  4882. rcu_read_unlock();
  4883. return ret;
  4884. }
  4885. static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
  4886. struct wmi_tlv_fw_stats_parse *parse,
  4887. const void *ptr,
  4888. u16 len)
  4889. {
  4890. struct ath11k_fw_stats *stats = parse->stats;
  4891. const struct wmi_stats_event *ev = parse->ev;
  4892. struct ath11k *ar;
  4893. struct ath11k_vif *arvif;
  4894. struct ieee80211_sta *sta;
  4895. struct ath11k_sta *arsta;
  4896. int i, ret = 0;
  4897. const void *data = ptr;
  4898. if (!ev) {
  4899. ath11k_warn(ab, "failed to fetch update stats ev");
  4900. return -EPROTO;
  4901. }
  4902. stats->stats_id = 0;
  4903. rcu_read_lock();
  4904. ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
  4905. for (i = 0; i < ev->num_pdev_stats; i++) {
  4906. const struct wmi_pdev_stats *src;
  4907. struct ath11k_fw_stats_pdev *dst;
  4908. src = data;
  4909. if (len < sizeof(*src)) {
  4910. ret = -EPROTO;
  4911. goto exit;
  4912. }
  4913. stats->stats_id = WMI_REQUEST_PDEV_STAT;
  4914. data += sizeof(*src);
  4915. len -= sizeof(*src);
  4916. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  4917. if (!dst)
  4918. continue;
  4919. ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
  4920. ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  4921. ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  4922. list_add_tail(&dst->list, &stats->pdevs);
  4923. }
  4924. for (i = 0; i < ev->num_vdev_stats; i++) {
  4925. const struct wmi_vdev_stats *src;
  4926. struct ath11k_fw_stats_vdev *dst;
  4927. src = data;
  4928. if (len < sizeof(*src)) {
  4929. ret = -EPROTO;
  4930. goto exit;
  4931. }
  4932. stats->stats_id = WMI_REQUEST_VDEV_STAT;
  4933. arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
  4934. if (arvif) {
  4935. sta = ieee80211_find_sta_by_ifaddr(ar->hw,
  4936. arvif->bssid,
  4937. NULL);
  4938. if (sta) {
  4939. arsta = (struct ath11k_sta *)sta->drv_priv;
  4940. arsta->rssi_beacon = src->beacon_snr;
  4941. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4942. "wmi stats vdev id %d snr %d\n",
  4943. src->vdev_id, src->beacon_snr);
  4944. } else {
  4945. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4946. "not found station of bssid %pM for vdev stat\n",
  4947. arvif->bssid);
  4948. }
  4949. }
  4950. data += sizeof(*src);
  4951. len -= sizeof(*src);
  4952. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  4953. if (!dst)
  4954. continue;
  4955. ath11k_wmi_pull_vdev_stats(src, dst);
  4956. list_add_tail(&dst->list, &stats->vdevs);
  4957. }
  4958. for (i = 0; i < ev->num_bcn_stats; i++) {
  4959. const struct wmi_bcn_stats *src;
  4960. struct ath11k_fw_stats_bcn *dst;
  4961. src = data;
  4962. if (len < sizeof(*src)) {
  4963. ret = -EPROTO;
  4964. goto exit;
  4965. }
  4966. stats->stats_id = WMI_REQUEST_BCN_STAT;
  4967. data += sizeof(*src);
  4968. len -= sizeof(*src);
  4969. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  4970. if (!dst)
  4971. continue;
  4972. ath11k_wmi_pull_bcn_stats(src, dst);
  4973. list_add_tail(&dst->list, &stats->bcn);
  4974. }
  4975. exit:
  4976. rcu_read_unlock();
  4977. return ret;
  4978. }
  4979. static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
  4980. u16 tag, u16 len,
  4981. const void *ptr, void *data)
  4982. {
  4983. struct wmi_tlv_fw_stats_parse *parse = data;
  4984. int ret = 0;
  4985. switch (tag) {
  4986. case WMI_TAG_STATS_EVENT:
  4987. parse->ev = (struct wmi_stats_event *)ptr;
  4988. parse->stats->pdev_id = parse->ev->pdev_id;
  4989. break;
  4990. case WMI_TAG_ARRAY_BYTE:
  4991. ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
  4992. break;
  4993. case WMI_TAG_PER_CHAIN_RSSI_STATS:
  4994. parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
  4995. if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
  4996. parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
  4997. ath11k_dbg(ab, ATH11K_DBG_WMI,
  4998. "wmi stats id 0x%x num chain %d\n",
  4999. parse->ev->stats_id,
  5000. parse->rssi_num);
  5001. break;
  5002. case WMI_TAG_ARRAY_STRUCT:
  5003. if (parse->rssi_num && !parse->chain_rssi_done) {
  5004. ret = ath11k_wmi_tlv_iter(ab, ptr, len,
  5005. ath11k_wmi_tlv_rssi_chain_parse,
  5006. parse);
  5007. if (ret) {
  5008. ath11k_warn(ab, "failed to parse rssi chain %d\n",
  5009. ret);
  5010. return ret;
  5011. }
  5012. parse->chain_rssi_done = true;
  5013. }
  5014. break;
  5015. default:
  5016. break;
  5017. }
  5018. return ret;
  5019. }
  5020. int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
  5021. struct ath11k_fw_stats *stats)
  5022. {
  5023. struct wmi_tlv_fw_stats_parse parse = { };
  5024. stats->stats_id = 0;
  5025. parse.stats = stats;
  5026. return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  5027. ath11k_wmi_tlv_fw_stats_parse,
  5028. &parse);
  5029. }
  5030. size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
  5031. {
  5032. struct ath11k_fw_stats_vdev *i;
  5033. size_t num = 0;
  5034. list_for_each_entry(i, head, list)
  5035. ++num;
  5036. return num;
  5037. }
  5038. static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head)
  5039. {
  5040. struct ath11k_fw_stats_bcn *i;
  5041. size_t num = 0;
  5042. list_for_each_entry(i, head, list)
  5043. ++num;
  5044. return num;
  5045. }
  5046. static void
  5047. ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
  5048. char *buf, u32 *length)
  5049. {
  5050. u32 len = *length;
  5051. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5052. len += scnprintf(buf + len, buf_len - len, "\n");
  5053. len += scnprintf(buf + len, buf_len - len, "%30s\n",
  5054. "ath11k PDEV stats");
  5055. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5056. "=================");
  5057. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5058. "Channel noise floor", pdev->ch_noise_floor);
  5059. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5060. "Channel TX power", pdev->chan_tx_power);
  5061. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5062. "TX frame count", pdev->tx_frame_count);
  5063. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5064. "RX frame count", pdev->rx_frame_count);
  5065. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5066. "RX clear count", pdev->rx_clear_count);
  5067. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5068. "Cycle count", pdev->cycle_count);
  5069. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5070. "PHY error count", pdev->phy_err_count);
  5071. *length = len;
  5072. }
  5073. static void
  5074. ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
  5075. char *buf, u32 *length)
  5076. {
  5077. u32 len = *length;
  5078. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5079. len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
  5080. "ath11k PDEV TX stats");
  5081. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5082. "====================");
  5083. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5084. "HTT cookies queued", pdev->comp_queued);
  5085. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5086. "HTT cookies disp.", pdev->comp_delivered);
  5087. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5088. "MSDU queued", pdev->msdu_enqued);
  5089. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5090. "MPDU queued", pdev->mpdu_enqued);
  5091. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5092. "MSDUs dropped", pdev->wmm_drop);
  5093. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5094. "Local enqued", pdev->local_enqued);
  5095. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5096. "Local freed", pdev->local_freed);
  5097. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5098. "HW queued", pdev->hw_queued);
  5099. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5100. "PPDUs reaped", pdev->hw_reaped);
  5101. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5102. "Num underruns", pdev->underrun);
  5103. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5104. "Num HW Paused", pdev->hw_paused);
  5105. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5106. "PPDUs cleaned", pdev->tx_abort);
  5107. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5108. "MPDUs requeued", pdev->mpdus_requeued);
  5109. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5110. "PPDU OK", pdev->tx_ko);
  5111. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5112. "Excessive retries", pdev->tx_xretry);
  5113. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5114. "HW rate", pdev->data_rc);
  5115. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5116. "Sched self triggers", pdev->self_triggers);
  5117. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5118. "Dropped due to SW retries",
  5119. pdev->sw_retry_failure);
  5120. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5121. "Illegal rate phy errors",
  5122. pdev->illgl_rate_phy_err);
  5123. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5124. "PDEV continuous xretry", pdev->pdev_cont_xretry);
  5125. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5126. "TX timeout", pdev->pdev_tx_timeout);
  5127. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5128. "PDEV resets", pdev->pdev_resets);
  5129. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5130. "Stateless TIDs alloc failures",
  5131. pdev->stateless_tid_alloc_failure);
  5132. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5133. "PHY underrun", pdev->phy_underrun);
  5134. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5135. "MPDU is more than txop limit", pdev->txop_ovf);
  5136. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5137. "Num sequences posted", pdev->seq_posted);
  5138. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5139. "Num seq failed queueing ", pdev->seq_failed_queueing);
  5140. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5141. "Num sequences completed ", pdev->seq_completed);
  5142. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5143. "Num sequences restarted ", pdev->seq_restarted);
  5144. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5145. "Num of MU sequences posted ", pdev->mu_seq_posted);
  5146. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5147. "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
  5148. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5149. "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
  5150. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5151. "Num of MPDUS truncated ", pdev->mpdus_truncated);
  5152. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5153. "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
  5154. len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  5155. "Num of MPDUS expired ", pdev->mpdus_expired);
  5156. *length = len;
  5157. }
  5158. static void
  5159. ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
  5160. char *buf, u32 *length)
  5161. {
  5162. u32 len = *length;
  5163. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5164. len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
  5165. "ath11k PDEV RX stats");
  5166. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5167. "====================");
  5168. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5169. "Mid PPDU route change",
  5170. pdev->mid_ppdu_route_change);
  5171. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5172. "Tot. number of statuses", pdev->status_rcvd);
  5173. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5174. "Extra frags on rings 0", pdev->r0_frags);
  5175. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5176. "Extra frags on rings 1", pdev->r1_frags);
  5177. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5178. "Extra frags on rings 2", pdev->r2_frags);
  5179. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5180. "Extra frags on rings 3", pdev->r3_frags);
  5181. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5182. "MSDUs delivered to HTT", pdev->htt_msdus);
  5183. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5184. "MPDUs delivered to HTT", pdev->htt_mpdus);
  5185. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5186. "MSDUs delivered to stack", pdev->loc_msdus);
  5187. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5188. "MPDUs delivered to stack", pdev->loc_mpdus);
  5189. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5190. "Oversized AMSUs", pdev->oversize_amsdu);
  5191. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5192. "PHY errors", pdev->phy_errs);
  5193. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5194. "PHY errors drops", pdev->phy_err_drop);
  5195. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5196. "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
  5197. len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  5198. "Overflow errors", pdev->rx_ovfl_errs);
  5199. *length = len;
  5200. }
  5201. static void
  5202. ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
  5203. const struct ath11k_fw_stats_vdev *vdev,
  5204. char *buf, u32 *length)
  5205. {
  5206. u32 len = *length;
  5207. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5208. struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
  5209. u8 *vif_macaddr;
  5210. int i;
  5211. /* VDEV stats has all the active VDEVs of other PDEVs as well,
  5212. * ignoring those not part of requested PDEV
  5213. */
  5214. if (!arvif)
  5215. return;
  5216. vif_macaddr = arvif->vif->addr;
  5217. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5218. "VDEV ID", vdev->vdev_id);
  5219. len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
  5220. "VDEV MAC address", vif_macaddr);
  5221. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5222. "beacon snr", vdev->beacon_snr);
  5223. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5224. "data snr", vdev->data_snr);
  5225. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5226. "num rx frames", vdev->num_rx_frames);
  5227. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5228. "num rts fail", vdev->num_rts_fail);
  5229. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5230. "num rts success", vdev->num_rts_success);
  5231. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5232. "num rx err", vdev->num_rx_err);
  5233. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5234. "num rx discard", vdev->num_rx_discard);
  5235. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5236. "num tx not acked", vdev->num_tx_not_acked);
  5237. for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
  5238. len += scnprintf(buf + len, buf_len - len,
  5239. "%25s [%02d] %u\n",
  5240. "num tx frames", i,
  5241. vdev->num_tx_frames[i]);
  5242. for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
  5243. len += scnprintf(buf + len, buf_len - len,
  5244. "%25s [%02d] %u\n",
  5245. "num tx frames retries", i,
  5246. vdev->num_tx_frames_retries[i]);
  5247. for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
  5248. len += scnprintf(buf + len, buf_len - len,
  5249. "%25s [%02d] %u\n",
  5250. "num tx frames failures", i,
  5251. vdev->num_tx_frames_failures[i]);
  5252. for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
  5253. len += scnprintf(buf + len, buf_len - len,
  5254. "%25s [%02d] 0x%08x\n",
  5255. "tx rate history", i,
  5256. vdev->tx_rate_history[i]);
  5257. for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
  5258. len += scnprintf(buf + len, buf_len - len,
  5259. "%25s [%02d] %u\n",
  5260. "beacon rssi history", i,
  5261. vdev->beacon_rssi_history[i]);
  5262. len += scnprintf(buf + len, buf_len - len, "\n");
  5263. *length = len;
  5264. }
  5265. static void
  5266. ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
  5267. const struct ath11k_fw_stats_bcn *bcn,
  5268. char *buf, u32 *length)
  5269. {
  5270. u32 len = *length;
  5271. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5272. struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
  5273. u8 *vdev_macaddr;
  5274. if (!arvif) {
  5275. ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
  5276. bcn->vdev_id);
  5277. return;
  5278. }
  5279. vdev_macaddr = arvif->vif->addr;
  5280. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5281. "VDEV ID", bcn->vdev_id);
  5282. len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
  5283. "VDEV MAC address", vdev_macaddr);
  5284. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5285. "================");
  5286. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5287. "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
  5288. len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  5289. "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
  5290. len += scnprintf(buf + len, buf_len - len, "\n");
  5291. *length = len;
  5292. }
  5293. void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
  5294. struct ath11k_fw_stats *fw_stats,
  5295. u32 stats_id, char *buf)
  5296. {
  5297. u32 len = 0;
  5298. u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
  5299. const struct ath11k_fw_stats_pdev *pdev;
  5300. const struct ath11k_fw_stats_vdev *vdev;
  5301. const struct ath11k_fw_stats_bcn *bcn;
  5302. size_t num_bcn;
  5303. spin_lock_bh(&ar->data_lock);
  5304. if (stats_id == WMI_REQUEST_PDEV_STAT) {
  5305. pdev = list_first_entry_or_null(&fw_stats->pdevs,
  5306. struct ath11k_fw_stats_pdev, list);
  5307. if (!pdev) {
  5308. ath11k_warn(ar->ab, "failed to get pdev stats\n");
  5309. goto unlock;
  5310. }
  5311. ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
  5312. ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
  5313. ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
  5314. }
  5315. if (stats_id == WMI_REQUEST_VDEV_STAT) {
  5316. len += scnprintf(buf + len, buf_len - len, "\n");
  5317. len += scnprintf(buf + len, buf_len - len, "%30s\n",
  5318. "ath11k VDEV stats");
  5319. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5320. "=================");
  5321. list_for_each_entry(vdev, &fw_stats->vdevs, list)
  5322. ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
  5323. }
  5324. if (stats_id == WMI_REQUEST_BCN_STAT) {
  5325. num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
  5326. len += scnprintf(buf + len, buf_len - len, "\n");
  5327. len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
  5328. "ath11k Beacon stats", num_bcn);
  5329. len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  5330. "===================");
  5331. list_for_each_entry(bcn, &fw_stats->bcn, list)
  5332. ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
  5333. }
  5334. unlock:
  5335. spin_unlock_bh(&ar->data_lock);
  5336. if (len >= buf_len)
  5337. buf[len - 1] = 0;
  5338. else
  5339. buf[len] = 0;
  5340. }
  5341. static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
  5342. {
  5343. /* try to send pending beacons first. they take priority */
  5344. wake_up(&ab->wmi_ab.tx_credits_wq);
  5345. }
  5346. static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
  5347. {
  5348. const struct wmi_11d_new_cc_ev *ev;
  5349. struct ath11k *ar;
  5350. struct ath11k_pdev *pdev;
  5351. const void **tb;
  5352. int ret, i;
  5353. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  5354. if (IS_ERR(tb)) {
  5355. ret = PTR_ERR(tb);
  5356. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  5357. return ret;
  5358. }
  5359. ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
  5360. if (!ev) {
  5361. kfree(tb);
  5362. ath11k_warn(ab, "failed to fetch 11d new cc ev");
  5363. return -EPROTO;
  5364. }
  5365. spin_lock_bh(&ab->base_lock);
  5366. memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
  5367. spin_unlock_bh(&ab->base_lock);
  5368. ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n",
  5369. ab->new_alpha2[0],
  5370. ab->new_alpha2[1]);
  5371. kfree(tb);
  5372. for (i = 0; i < ab->num_radios; i++) {
  5373. pdev = &ab->pdevs[i];
  5374. ar = pdev->ar;
  5375. ar->state_11d = ATH11K_11D_IDLE;
  5376. complete(&ar->completed_11d_scan);
  5377. }
  5378. queue_work(ab->workqueue, &ab->update_11d_work);
  5379. return 0;
  5380. }
  5381. static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
  5382. struct sk_buff *skb)
  5383. {
  5384. struct ath11k_pdev_wmi *wmi = NULL;
  5385. u32 i;
  5386. u8 wmi_ep_count;
  5387. u8 eid;
  5388. eid = ATH11K_SKB_CB(skb)->eid;
  5389. dev_kfree_skb(skb);
  5390. if (eid >= ATH11K_HTC_EP_COUNT)
  5391. return;
  5392. wmi_ep_count = ab->htc.wmi_ep_count;
  5393. if (wmi_ep_count > ab->hw_params.max_radios)
  5394. return;
  5395. for (i = 0; i < ab->htc.wmi_ep_count; i++) {
  5396. if (ab->wmi_ab.wmi[i].eid == eid) {
  5397. wmi = &ab->wmi_ab.wmi[i];
  5398. break;
  5399. }
  5400. }
  5401. if (wmi)
  5402. wake_up(&wmi->tx_ce_desc_wq);
  5403. }
  5404. static bool ath11k_reg_is_world_alpha(char *alpha)
  5405. {
  5406. if (alpha[0] == '0' && alpha[1] == '0')
  5407. return true;
  5408. if (alpha[0] == 'n' && alpha[1] == 'a')
  5409. return true;
  5410. return false;
  5411. }
  5412. static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
  5413. {
  5414. struct cur_regulatory_info *reg_info = NULL;
  5415. struct ieee80211_regdomain *regd = NULL;
  5416. bool intersect = false;
  5417. int ret = 0, pdev_idx;
  5418. struct ath11k *ar;
  5419. reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
  5420. if (!reg_info) {
  5421. ret = -ENOMEM;
  5422. goto fallback;
  5423. }
  5424. ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
  5425. if (ret) {
  5426. ath11k_warn(ab, "failed to extract regulatory info from received event\n");
  5427. goto fallback;
  5428. }
  5429. if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
  5430. /* In case of failure to set the requested ctry,
  5431. * fw retains the current regd. We print a failure info
  5432. * and return from here.
  5433. */
  5434. ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
  5435. goto mem_free;
  5436. }
  5437. pdev_idx = reg_info->phy_id;
  5438. /* Avoid default reg rule updates sent during FW recovery if
  5439. * it is already available
  5440. */
  5441. spin_lock(&ab->base_lock);
  5442. if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
  5443. ab->default_regd[pdev_idx]) {
  5444. spin_unlock(&ab->base_lock);
  5445. goto mem_free;
  5446. }
  5447. spin_unlock(&ab->base_lock);
  5448. if (pdev_idx >= ab->num_radios) {
  5449. /* Process the event for phy0 only if single_pdev_only
  5450. * is true. If pdev_idx is valid but not 0, discard the
  5451. * event. Otherwise, it goes to fallback.
  5452. */
  5453. if (ab->hw_params.single_pdev_only &&
  5454. pdev_idx < ab->hw_params.num_rxmda_per_pdev)
  5455. goto mem_free;
  5456. else
  5457. goto fallback;
  5458. }
  5459. /* Avoid multiple overwrites to default regd, during core
  5460. * stop-start after mac registration.
  5461. */
  5462. if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
  5463. !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
  5464. (char *)reg_info->alpha2, 2))
  5465. goto mem_free;
  5466. /* Intersect new rules with default regd if a new country setting was
  5467. * requested, i.e a default regd was already set during initialization
  5468. * and the regd coming from this event has a valid country info.
  5469. */
  5470. if (ab->default_regd[pdev_idx] &&
  5471. !ath11k_reg_is_world_alpha((char *)
  5472. ab->default_regd[pdev_idx]->alpha2) &&
  5473. !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
  5474. intersect = true;
  5475. regd = ath11k_reg_build_regd(ab, reg_info, intersect);
  5476. if (!regd) {
  5477. ath11k_warn(ab, "failed to build regd from reg_info\n");
  5478. goto fallback;
  5479. }
  5480. spin_lock(&ab->base_lock);
  5481. if (ab->default_regd[pdev_idx]) {
  5482. /* The initial rules from FW after WMI Init is to build
  5483. * the default regd. From then on, any rules updated for
  5484. * the pdev could be due to user reg changes.
  5485. * Free previously built regd before assigning the newly
  5486. * generated regd to ar. NULL pointer handling will be
  5487. * taken care by kfree itself.
  5488. */
  5489. ar = ab->pdevs[pdev_idx].ar;
  5490. kfree(ab->new_regd[pdev_idx]);
  5491. ab->new_regd[pdev_idx] = regd;
  5492. queue_work(ab->workqueue, &ar->regd_update_work);
  5493. } else {
  5494. /* This regd would be applied during mac registration and is
  5495. * held constant throughout for regd intersection purpose
  5496. */
  5497. ab->default_regd[pdev_idx] = regd;
  5498. }
  5499. ab->dfs_region = reg_info->dfs_region;
  5500. spin_unlock(&ab->base_lock);
  5501. goto mem_free;
  5502. fallback:
  5503. /* Fallback to older reg (by sending previous country setting
  5504. * again if fw has succeeded and we failed to process here.
  5505. * The Regdomain should be uniform across driver and fw. Since the
  5506. * FW has processed the command and sent a success status, we expect
  5507. * this function to succeed as well. If it doesn't, CTRY needs to be
  5508. * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
  5509. */
  5510. /* TODO: This is rare, but still should also be handled */
  5511. WARN_ON(1);
  5512. mem_free:
  5513. if (reg_info) {
  5514. kfree(reg_info->reg_rules_2g_ptr);
  5515. kfree(reg_info->reg_rules_5g_ptr);
  5516. kfree(reg_info);
  5517. }
  5518. return ret;
  5519. }
  5520. static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
  5521. const void *ptr, void *data)
  5522. {
  5523. struct wmi_tlv_rdy_parse *rdy_parse = data;
  5524. struct wmi_ready_event fixed_param;
  5525. struct wmi_mac_addr *addr_list;
  5526. struct ath11k_pdev *pdev;
  5527. u32 num_mac_addr;
  5528. int i;
  5529. switch (tag) {
  5530. case WMI_TAG_READY_EVENT:
  5531. memset(&fixed_param, 0, sizeof(fixed_param));
  5532. memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
  5533. min_t(u16, sizeof(fixed_param), len));
  5534. ab->wlan_init_status = fixed_param.ready_event_min.status;
  5535. rdy_parse->num_extra_mac_addr =
  5536. fixed_param.ready_event_min.num_extra_mac_addr;
  5537. ether_addr_copy(ab->mac_addr,
  5538. fixed_param.ready_event_min.mac_addr.addr);
  5539. ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
  5540. ab->wmi_ready = true;
  5541. break;
  5542. case WMI_TAG_ARRAY_FIXED_STRUCT:
  5543. addr_list = (struct wmi_mac_addr *)ptr;
  5544. num_mac_addr = rdy_parse->num_extra_mac_addr;
  5545. if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
  5546. break;
  5547. for (i = 0; i < ab->num_radios; i++) {
  5548. pdev = &ab->pdevs[i];
  5549. ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
  5550. }
  5551. ab->pdevs_macaddr_valid = true;
  5552. break;
  5553. default:
  5554. break;
  5555. }
  5556. return 0;
  5557. }
  5558. static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
  5559. {
  5560. struct wmi_tlv_rdy_parse rdy_parse = { };
  5561. int ret;
  5562. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  5563. ath11k_wmi_tlv_rdy_parse, &rdy_parse);
  5564. if (ret) {
  5565. ath11k_warn(ab, "failed to parse tlv %d\n", ret);
  5566. return ret;
  5567. }
  5568. complete(&ab->wmi_ab.unified_ready);
  5569. return 0;
  5570. }
  5571. static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
  5572. {
  5573. struct wmi_peer_delete_resp_event peer_del_resp;
  5574. struct ath11k *ar;
  5575. if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
  5576. ath11k_warn(ab, "failed to extract peer delete resp");
  5577. return;
  5578. }
  5579. rcu_read_lock();
  5580. ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
  5581. if (!ar) {
  5582. ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
  5583. peer_del_resp.vdev_id);
  5584. rcu_read_unlock();
  5585. return;
  5586. }
  5587. complete(&ar->peer_delete_done);
  5588. rcu_read_unlock();
  5589. ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
  5590. peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
  5591. }
  5592. static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
  5593. struct sk_buff *skb)
  5594. {
  5595. struct ath11k *ar;
  5596. u32 vdev_id = 0;
  5597. if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
  5598. ath11k_warn(ab, "failed to extract vdev delete resp");
  5599. return;
  5600. }
  5601. rcu_read_lock();
  5602. ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
  5603. if (!ar) {
  5604. ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
  5605. vdev_id);
  5606. rcu_read_unlock();
  5607. return;
  5608. }
  5609. complete(&ar->vdev_delete_done);
  5610. rcu_read_unlock();
  5611. ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev delete resp for vdev id %d\n",
  5612. vdev_id);
  5613. }
  5614. static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
  5615. {
  5616. switch (vdev_resp_status) {
  5617. case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
  5618. return "invalid vdev id";
  5619. case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
  5620. return "not supported";
  5621. case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
  5622. return "dfs violation";
  5623. case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
  5624. return "invalid regdomain";
  5625. default:
  5626. return "unknown";
  5627. }
  5628. }
  5629. static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
  5630. {
  5631. struct wmi_vdev_start_resp_event vdev_start_resp;
  5632. struct ath11k *ar;
  5633. u32 status;
  5634. if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
  5635. ath11k_warn(ab, "failed to extract vdev start resp");
  5636. return;
  5637. }
  5638. rcu_read_lock();
  5639. ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
  5640. if (!ar) {
  5641. ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
  5642. vdev_start_resp.vdev_id);
  5643. rcu_read_unlock();
  5644. return;
  5645. }
  5646. ar->last_wmi_vdev_start_status = 0;
  5647. status = vdev_start_resp.status;
  5648. if (WARN_ON_ONCE(status)) {
  5649. ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
  5650. status, ath11k_wmi_vdev_resp_print(status));
  5651. ar->last_wmi_vdev_start_status = status;
  5652. }
  5653. complete(&ar->vdev_setup_done);
  5654. rcu_read_unlock();
  5655. ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
  5656. vdev_start_resp.vdev_id);
  5657. }
  5658. static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
  5659. {
  5660. struct ath11k_vif *arvif;
  5661. u32 vdev_id, tx_status;
  5662. if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
  5663. &vdev_id, &tx_status) != 0) {
  5664. ath11k_warn(ab, "failed to extract bcn tx status");
  5665. return;
  5666. }
  5667. rcu_read_lock();
  5668. arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
  5669. if (!arvif) {
  5670. ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
  5671. vdev_id);
  5672. rcu_read_unlock();
  5673. return;
  5674. }
  5675. ath11k_mac_bcn_tx_event(arvif);
  5676. rcu_read_unlock();
  5677. }
  5678. static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
  5679. struct sk_buff *skb)
  5680. {
  5681. const struct wmi_peer_sta_ps_state_chg_event *ev;
  5682. struct ieee80211_sta *sta;
  5683. struct ath11k_peer *peer;
  5684. struct ath11k *ar;
  5685. struct ath11k_sta *arsta;
  5686. const void **tb;
  5687. enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
  5688. int ret;
  5689. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  5690. if (IS_ERR(tb)) {
  5691. ret = PTR_ERR(tb);
  5692. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  5693. return;
  5694. }
  5695. ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
  5696. if (!ev) {
  5697. ath11k_warn(ab, "failed to fetch sta ps change ev");
  5698. kfree(tb);
  5699. return;
  5700. }
  5701. ath11k_dbg(ab, ATH11K_DBG_WMI,
  5702. "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
  5703. ev->peer_macaddr.addr, ev->peer_ps_state,
  5704. ev->ps_supported_bitmap, ev->peer_ps_valid,
  5705. ev->peer_ps_timestamp);
  5706. rcu_read_lock();
  5707. spin_lock_bh(&ab->base_lock);
  5708. peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
  5709. if (!peer) {
  5710. spin_unlock_bh(&ab->base_lock);
  5711. ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
  5712. goto exit;
  5713. }
  5714. ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
  5715. if (!ar) {
  5716. spin_unlock_bh(&ab->base_lock);
  5717. ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
  5718. peer->vdev_id);
  5719. goto exit;
  5720. }
  5721. sta = peer->sta;
  5722. spin_unlock_bh(&ab->base_lock);
  5723. if (!sta) {
  5724. ath11k_warn(ab, "failed to find station entry %pM\n",
  5725. ev->peer_macaddr.addr);
  5726. goto exit;
  5727. }
  5728. arsta = (struct ath11k_sta *)sta->drv_priv;
  5729. spin_lock_bh(&ar->data_lock);
  5730. peer_previous_ps_state = arsta->peer_ps_state;
  5731. arsta->peer_ps_state = ev->peer_ps_state;
  5732. arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
  5733. if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
  5734. ar->ab->wmi_ab.svc_map)) {
  5735. if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
  5736. !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
  5737. !ev->peer_ps_valid)
  5738. goto out;
  5739. if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
  5740. arsta->ps_start_time = ev->peer_ps_timestamp;
  5741. arsta->ps_start_jiffies = jiffies;
  5742. } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
  5743. peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
  5744. arsta->ps_total_duration = arsta->ps_total_duration +
  5745. (ev->peer_ps_timestamp - arsta->ps_start_time);
  5746. }
  5747. if (ar->ps_timekeeper_enable)
  5748. trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
  5749. ev->peer_ps_timestamp,
  5750. arsta->peer_ps_state);
  5751. }
  5752. out:
  5753. spin_unlock_bh(&ar->data_lock);
  5754. exit:
  5755. rcu_read_unlock();
  5756. kfree(tb);
  5757. }
  5758. static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
  5759. {
  5760. struct ath11k *ar;
  5761. u32 vdev_id = 0;
  5762. if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
  5763. ath11k_warn(ab, "failed to extract vdev stopped event");
  5764. return;
  5765. }
  5766. rcu_read_lock();
  5767. ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
  5768. if (!ar) {
  5769. ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
  5770. vdev_id);
  5771. rcu_read_unlock();
  5772. return;
  5773. }
  5774. complete(&ar->vdev_setup_done);
  5775. rcu_read_unlock();
  5776. ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
  5777. }
  5778. static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
  5779. {
  5780. struct mgmt_rx_event_params rx_ev = {0};
  5781. struct ath11k *ar;
  5782. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  5783. struct ieee80211_hdr *hdr;
  5784. u16 fc;
  5785. struct ieee80211_supported_band *sband;
  5786. if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
  5787. ath11k_warn(ab, "failed to extract mgmt rx event");
  5788. dev_kfree_skb(skb);
  5789. return;
  5790. }
  5791. memset(status, 0, sizeof(*status));
  5792. ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n",
  5793. rx_ev.status);
  5794. rcu_read_lock();
  5795. ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
  5796. if (!ar) {
  5797. ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
  5798. rx_ev.pdev_id);
  5799. dev_kfree_skb(skb);
  5800. goto exit;
  5801. }
  5802. if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
  5803. (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
  5804. WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
  5805. dev_kfree_skb(skb);
  5806. goto exit;
  5807. }
  5808. if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
  5809. status->flag |= RX_FLAG_MMIC_ERROR;
  5810. if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
  5811. rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
  5812. status->band = NL80211_BAND_6GHZ;
  5813. status->freq = rx_ev.chan_freq;
  5814. } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
  5815. status->band = NL80211_BAND_2GHZ;
  5816. } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
  5817. status->band = NL80211_BAND_5GHZ;
  5818. } else {
  5819. /* Shouldn't happen unless list of advertised channels to
  5820. * mac80211 has been changed.
  5821. */
  5822. WARN_ON_ONCE(1);
  5823. dev_kfree_skb(skb);
  5824. goto exit;
  5825. }
  5826. if (rx_ev.phy_mode == MODE_11B &&
  5827. (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
  5828. ath11k_dbg(ab, ATH11K_DBG_WMI,
  5829. "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
  5830. sband = &ar->mac.sbands[status->band];
  5831. if (status->band != NL80211_BAND_6GHZ)
  5832. status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
  5833. status->band);
  5834. status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
  5835. status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
  5836. hdr = (struct ieee80211_hdr *)skb->data;
  5837. fc = le16_to_cpu(hdr->frame_control);
  5838. /* Firmware is guaranteed to report all essential management frames via
  5839. * WMI while it can deliver some extra via HTT. Since there can be
  5840. * duplicates split the reporting wrt monitor/sniffing.
  5841. */
  5842. status->flag |= RX_FLAG_SKIP_MONITOR;
  5843. /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
  5844. * Don't clear that. Also, FW delivers broadcast management frames
  5845. * (ex: group privacy action frames in mesh) as encrypted payload.
  5846. */
  5847. if (ieee80211_has_protected(hdr->frame_control) &&
  5848. !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
  5849. status->flag |= RX_FLAG_DECRYPTED;
  5850. if (!ieee80211_is_robust_mgmt_frame(skb)) {
  5851. status->flag |= RX_FLAG_IV_STRIPPED |
  5852. RX_FLAG_MMIC_STRIPPED;
  5853. hdr->frame_control = __cpu_to_le16(fc &
  5854. ~IEEE80211_FCTL_PROTECTED);
  5855. }
  5856. }
  5857. if (ieee80211_is_beacon(hdr->frame_control))
  5858. ath11k_mac_handle_beacon(ar, skb);
  5859. ath11k_dbg(ab, ATH11K_DBG_MGMT,
  5860. "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
  5861. skb, skb->len,
  5862. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  5863. ath11k_dbg(ab, ATH11K_DBG_MGMT,
  5864. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  5865. status->freq, status->band, status->signal,
  5866. status->rate_idx);
  5867. ieee80211_rx_ni(ar->hw, skb);
  5868. exit:
  5869. rcu_read_unlock();
  5870. }
  5871. static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
  5872. {
  5873. struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
  5874. struct ath11k *ar;
  5875. if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
  5876. ath11k_warn(ab, "failed to extract mgmt tx compl event");
  5877. return;
  5878. }
  5879. rcu_read_lock();
  5880. ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
  5881. if (!ar) {
  5882. ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
  5883. tx_compl_param.pdev_id);
  5884. goto exit;
  5885. }
  5886. wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id,
  5887. tx_compl_param.status);
  5888. ath11k_dbg(ab, ATH11K_DBG_MGMT,
  5889. "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
  5890. tx_compl_param.pdev_id, tx_compl_param.desc_id,
  5891. tx_compl_param.status);
  5892. exit:
  5893. rcu_read_unlock();
  5894. }
  5895. static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
  5896. u32 vdev_id,
  5897. enum ath11k_scan_state state)
  5898. {
  5899. int i;
  5900. struct ath11k_pdev *pdev;
  5901. struct ath11k *ar;
  5902. for (i = 0; i < ab->num_radios; i++) {
  5903. pdev = rcu_dereference(ab->pdevs_active[i]);
  5904. if (pdev && pdev->ar) {
  5905. ar = pdev->ar;
  5906. spin_lock_bh(&ar->data_lock);
  5907. if (ar->scan.state == state &&
  5908. ar->scan.vdev_id == vdev_id) {
  5909. spin_unlock_bh(&ar->data_lock);
  5910. return ar;
  5911. }
  5912. spin_unlock_bh(&ar->data_lock);
  5913. }
  5914. }
  5915. return NULL;
  5916. }
  5917. static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
  5918. {
  5919. struct ath11k *ar;
  5920. struct wmi_scan_event scan_ev = {0};
  5921. if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
  5922. ath11k_warn(ab, "failed to extract scan event");
  5923. return;
  5924. }
  5925. rcu_read_lock();
  5926. /* In case the scan was cancelled, ex. during interface teardown,
  5927. * the interface will not be found in active interfaces.
  5928. * Rather, in such scenarios, iterate over the active pdev's to
  5929. * search 'ar' if the corresponding 'ar' scan is ABORTING and the
  5930. * aborting scan's vdev id matches this event info.
  5931. */
  5932. if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
  5933. scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
  5934. ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
  5935. ATH11K_SCAN_ABORTING);
  5936. if (!ar)
  5937. ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
  5938. ATH11K_SCAN_RUNNING);
  5939. } else {
  5940. ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
  5941. }
  5942. if (!ar) {
  5943. ath11k_warn(ab, "Received scan event for unknown vdev");
  5944. rcu_read_unlock();
  5945. return;
  5946. }
  5947. spin_lock_bh(&ar->data_lock);
  5948. ath11k_dbg(ab, ATH11K_DBG_WMI,
  5949. "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  5950. ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
  5951. scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
  5952. scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
  5953. ath11k_scan_state_str(ar->scan.state), ar->scan.state);
  5954. switch (scan_ev.event_type) {
  5955. case WMI_SCAN_EVENT_STARTED:
  5956. ath11k_wmi_event_scan_started(ar);
  5957. break;
  5958. case WMI_SCAN_EVENT_COMPLETED:
  5959. ath11k_wmi_event_scan_completed(ar);
  5960. break;
  5961. case WMI_SCAN_EVENT_BSS_CHANNEL:
  5962. ath11k_wmi_event_scan_bss_chan(ar);
  5963. break;
  5964. case WMI_SCAN_EVENT_FOREIGN_CHAN:
  5965. ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
  5966. break;
  5967. case WMI_SCAN_EVENT_START_FAILED:
  5968. ath11k_warn(ab, "received scan start failure event\n");
  5969. ath11k_wmi_event_scan_start_failed(ar);
  5970. break;
  5971. case WMI_SCAN_EVENT_DEQUEUED:
  5972. __ath11k_mac_scan_finish(ar);
  5973. break;
  5974. case WMI_SCAN_EVENT_PREEMPTED:
  5975. case WMI_SCAN_EVENT_RESTARTED:
  5976. case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
  5977. default:
  5978. break;
  5979. }
  5980. spin_unlock_bh(&ar->data_lock);
  5981. rcu_read_unlock();
  5982. }
  5983. static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
  5984. {
  5985. struct wmi_peer_sta_kickout_arg arg = {};
  5986. struct ieee80211_sta *sta;
  5987. struct ath11k_peer *peer;
  5988. struct ath11k *ar;
  5989. u32 vdev_id;
  5990. if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
  5991. ath11k_warn(ab, "failed to extract peer sta kickout event");
  5992. return;
  5993. }
  5994. rcu_read_lock();
  5995. spin_lock_bh(&ab->base_lock);
  5996. peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
  5997. if (!peer) {
  5998. ath11k_warn(ab, "peer not found %pM\n",
  5999. arg.mac_addr);
  6000. spin_unlock_bh(&ab->base_lock);
  6001. goto exit;
  6002. }
  6003. vdev_id = peer->vdev_id;
  6004. spin_unlock_bh(&ab->base_lock);
  6005. ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
  6006. if (!ar) {
  6007. ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
  6008. peer->vdev_id);
  6009. goto exit;
  6010. }
  6011. sta = ieee80211_find_sta_by_ifaddr(ar->hw,
  6012. arg.mac_addr, NULL);
  6013. if (!sta) {
  6014. ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
  6015. arg.mac_addr);
  6016. goto exit;
  6017. }
  6018. ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM",
  6019. arg.mac_addr);
  6020. ieee80211_report_low_ack(sta, 10);
  6021. exit:
  6022. rcu_read_unlock();
  6023. }
  6024. static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
  6025. {
  6026. struct wmi_roam_event roam_ev = {};
  6027. struct ath11k *ar;
  6028. if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
  6029. ath11k_warn(ab, "failed to extract roam event");
  6030. return;
  6031. }
  6032. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6033. "wmi roam event vdev %u reason 0x%08x rssi %d\n",
  6034. roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
  6035. rcu_read_lock();
  6036. ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
  6037. if (!ar) {
  6038. ath11k_warn(ab, "invalid vdev id in roam ev %d",
  6039. roam_ev.vdev_id);
  6040. rcu_read_unlock();
  6041. return;
  6042. }
  6043. if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
  6044. ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
  6045. roam_ev.reason, roam_ev.vdev_id);
  6046. switch (roam_ev.reason) {
  6047. case WMI_ROAM_REASON_BEACON_MISS:
  6048. ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
  6049. break;
  6050. case WMI_ROAM_REASON_BETTER_AP:
  6051. case WMI_ROAM_REASON_LOW_RSSI:
  6052. case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
  6053. case WMI_ROAM_REASON_HO_FAILED:
  6054. ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
  6055. roam_ev.reason, roam_ev.vdev_id);
  6056. break;
  6057. }
  6058. rcu_read_unlock();
  6059. }
  6060. static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
  6061. {
  6062. struct wmi_chan_info_event ch_info_ev = {0};
  6063. struct ath11k *ar;
  6064. struct survey_info *survey;
  6065. int idx;
  6066. /* HW channel counters frequency value in hertz */
  6067. u32 cc_freq_hz = ab->cc_freq_hz;
  6068. if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
  6069. ath11k_warn(ab, "failed to extract chan info event");
  6070. return;
  6071. }
  6072. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6073. "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
  6074. ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
  6075. ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
  6076. ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
  6077. ch_info_ev.mac_clk_mhz);
  6078. if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
  6079. ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
  6080. return;
  6081. }
  6082. rcu_read_lock();
  6083. ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
  6084. if (!ar) {
  6085. ath11k_warn(ab, "invalid vdev id in chan info ev %d",
  6086. ch_info_ev.vdev_id);
  6087. rcu_read_unlock();
  6088. return;
  6089. }
  6090. spin_lock_bh(&ar->data_lock);
  6091. switch (ar->scan.state) {
  6092. case ATH11K_SCAN_IDLE:
  6093. case ATH11K_SCAN_STARTING:
  6094. ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
  6095. goto exit;
  6096. case ATH11K_SCAN_RUNNING:
  6097. case ATH11K_SCAN_ABORTING:
  6098. break;
  6099. }
  6100. idx = freq_to_idx(ar, ch_info_ev.freq);
  6101. if (idx >= ARRAY_SIZE(ar->survey)) {
  6102. ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  6103. ch_info_ev.freq, idx);
  6104. goto exit;
  6105. }
  6106. /* If FW provides MAC clock frequency in Mhz, overriding the initialized
  6107. * HW channel counters frequency value
  6108. */
  6109. if (ch_info_ev.mac_clk_mhz)
  6110. cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
  6111. if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
  6112. survey = &ar->survey[idx];
  6113. memset(survey, 0, sizeof(*survey));
  6114. survey->noise = ch_info_ev.noise_floor;
  6115. survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
  6116. SURVEY_INFO_TIME_BUSY;
  6117. survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
  6118. survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
  6119. }
  6120. exit:
  6121. spin_unlock_bh(&ar->data_lock);
  6122. rcu_read_unlock();
  6123. }
  6124. static void
  6125. ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
  6126. {
  6127. struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
  6128. struct survey_info *survey;
  6129. struct ath11k *ar;
  6130. u32 cc_freq_hz = ab->cc_freq_hz;
  6131. u64 busy, total, tx, rx, rx_bss;
  6132. int idx;
  6133. if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
  6134. ath11k_warn(ab, "failed to extract pdev bss chan info event");
  6135. return;
  6136. }
  6137. busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
  6138. bss_ch_info_ev.rx_clear_count_low;
  6139. total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
  6140. bss_ch_info_ev.cycle_count_low;
  6141. tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
  6142. bss_ch_info_ev.tx_cycle_count_low;
  6143. rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
  6144. bss_ch_info_ev.rx_cycle_count_low;
  6145. rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
  6146. bss_ch_info_ev.rx_bss_cycle_count_low;
  6147. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6148. "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
  6149. bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
  6150. bss_ch_info_ev.noise_floor, busy, total,
  6151. tx, rx, rx_bss);
  6152. rcu_read_lock();
  6153. ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
  6154. if (!ar) {
  6155. ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
  6156. bss_ch_info_ev.pdev_id);
  6157. rcu_read_unlock();
  6158. return;
  6159. }
  6160. spin_lock_bh(&ar->data_lock);
  6161. idx = freq_to_idx(ar, bss_ch_info_ev.freq);
  6162. if (idx >= ARRAY_SIZE(ar->survey)) {
  6163. ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
  6164. bss_ch_info_ev.freq, idx);
  6165. goto exit;
  6166. }
  6167. survey = &ar->survey[idx];
  6168. survey->noise = bss_ch_info_ev.noise_floor;
  6169. survey->time = div_u64(total, cc_freq_hz);
  6170. survey->time_busy = div_u64(busy, cc_freq_hz);
  6171. survey->time_rx = div_u64(rx_bss, cc_freq_hz);
  6172. survey->time_tx = div_u64(tx, cc_freq_hz);
  6173. survey->filled |= (SURVEY_INFO_NOISE_DBM |
  6174. SURVEY_INFO_TIME |
  6175. SURVEY_INFO_TIME_BUSY |
  6176. SURVEY_INFO_TIME_RX |
  6177. SURVEY_INFO_TIME_TX);
  6178. exit:
  6179. spin_unlock_bh(&ar->data_lock);
  6180. complete(&ar->bss_survey_done);
  6181. rcu_read_unlock();
  6182. }
  6183. static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
  6184. struct sk_buff *skb)
  6185. {
  6186. struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
  6187. struct ath11k *ar;
  6188. if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
  6189. ath11k_warn(ab, "failed to extract install key compl event");
  6190. return;
  6191. }
  6192. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6193. "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
  6194. install_key_compl.key_idx, install_key_compl.key_flags,
  6195. install_key_compl.macaddr, install_key_compl.status);
  6196. rcu_read_lock();
  6197. ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
  6198. if (!ar) {
  6199. ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
  6200. install_key_compl.vdev_id);
  6201. rcu_read_unlock();
  6202. return;
  6203. }
  6204. ar->install_key_status = 0;
  6205. if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
  6206. ath11k_warn(ab, "install key failed for %pM status %d\n",
  6207. install_key_compl.macaddr, install_key_compl.status);
  6208. ar->install_key_status = install_key_compl.status;
  6209. }
  6210. complete(&ar->install_key_done);
  6211. rcu_read_unlock();
  6212. }
  6213. static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
  6214. u16 tag, u16 len,
  6215. const void *ptr, void *data)
  6216. {
  6217. const struct wmi_service_available_event *ev;
  6218. u32 *wmi_ext2_service_bitmap;
  6219. int i, j;
  6220. switch (tag) {
  6221. case WMI_TAG_SERVICE_AVAILABLE_EVENT:
  6222. ev = (struct wmi_service_available_event *)ptr;
  6223. for (i = 0, j = WMI_MAX_SERVICE;
  6224. i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
  6225. i++) {
  6226. do {
  6227. if (ev->wmi_service_segment_bitmap[i] &
  6228. BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
  6229. set_bit(j, ab->wmi_ab.svc_map);
  6230. } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
  6231. }
  6232. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6233. "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
  6234. ev->wmi_service_segment_bitmap[0],
  6235. ev->wmi_service_segment_bitmap[1],
  6236. ev->wmi_service_segment_bitmap[2],
  6237. ev->wmi_service_segment_bitmap[3]);
  6238. break;
  6239. case WMI_TAG_ARRAY_UINT32:
  6240. wmi_ext2_service_bitmap = (u32 *)ptr;
  6241. for (i = 0, j = WMI_MAX_EXT_SERVICE;
  6242. i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
  6243. i++) {
  6244. do {
  6245. if (wmi_ext2_service_bitmap[i] &
  6246. BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
  6247. set_bit(j, ab->wmi_ab.svc_map);
  6248. } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
  6249. }
  6250. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6251. "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
  6252. wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
  6253. wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
  6254. break;
  6255. }
  6256. return 0;
  6257. }
  6258. static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
  6259. {
  6260. int ret;
  6261. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  6262. ath11k_wmi_tlv_services_parser,
  6263. NULL);
  6264. if (ret)
  6265. ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
  6266. }
  6267. static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
  6268. {
  6269. struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
  6270. struct ath11k *ar;
  6271. if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
  6272. ath11k_warn(ab, "failed to extract peer assoc conf event");
  6273. return;
  6274. }
  6275. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6276. "peer assoc conf ev vdev id %d macaddr %pM\n",
  6277. peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
  6278. rcu_read_lock();
  6279. ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
  6280. if (!ar) {
  6281. ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
  6282. peer_assoc_conf.vdev_id);
  6283. rcu_read_unlock();
  6284. return;
  6285. }
  6286. complete(&ar->peer_assoc_done);
  6287. rcu_read_unlock();
  6288. }
  6289. static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
  6290. {
  6291. struct ath11k_fw_stats stats = {};
  6292. struct ath11k *ar;
  6293. int ret;
  6294. INIT_LIST_HEAD(&stats.pdevs);
  6295. INIT_LIST_HEAD(&stats.vdevs);
  6296. INIT_LIST_HEAD(&stats.bcn);
  6297. ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
  6298. if (ret) {
  6299. ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
  6300. goto free;
  6301. }
  6302. rcu_read_lock();
  6303. ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
  6304. if (!ar) {
  6305. rcu_read_unlock();
  6306. ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
  6307. stats.pdev_id, ret);
  6308. goto free;
  6309. }
  6310. spin_lock_bh(&ar->data_lock);
  6311. /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
  6312. * debugfs fw stats. Therefore, processing it separately.
  6313. */
  6314. if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
  6315. list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
  6316. ar->fw_stats_done = true;
  6317. goto complete;
  6318. }
  6319. /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
  6320. * are currently requested only via debugfs fw stats. Hence, processing these
  6321. * in debugfs context
  6322. */
  6323. ath11k_debugfs_fw_stats_process(ar, &stats);
  6324. complete:
  6325. complete(&ar->fw_stats_complete);
  6326. rcu_read_unlock();
  6327. spin_unlock_bh(&ar->data_lock);
  6328. /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
  6329. * at this point, no need to free the individual list.
  6330. */
  6331. return;
  6332. free:
  6333. ath11k_fw_stats_free(&stats);
  6334. }
  6335. /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
  6336. * is not part of BDF CTL(Conformance test limits) table entries.
  6337. */
  6338. static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
  6339. struct sk_buff *skb)
  6340. {
  6341. const void **tb;
  6342. const struct wmi_pdev_ctl_failsafe_chk_event *ev;
  6343. int ret;
  6344. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6345. if (IS_ERR(tb)) {
  6346. ret = PTR_ERR(tb);
  6347. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  6348. return;
  6349. }
  6350. ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
  6351. if (!ev) {
  6352. ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
  6353. kfree(tb);
  6354. return;
  6355. }
  6356. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6357. "pdev ctl failsafe check ev status %d\n",
  6358. ev->ctl_failsafe_status);
  6359. /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
  6360. * to 10 dBm else the CTL power entry in the BDF would be picked up.
  6361. */
  6362. if (ev->ctl_failsafe_status != 0)
  6363. ath11k_warn(ab, "pdev ctl failsafe failure status %d",
  6364. ev->ctl_failsafe_status);
  6365. kfree(tb);
  6366. }
  6367. static void
  6368. ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
  6369. const struct wmi_pdev_csa_switch_ev *ev,
  6370. const u32 *vdev_ids)
  6371. {
  6372. int i;
  6373. struct ath11k_vif *arvif;
  6374. /* Finish CSA once the switch count becomes NULL */
  6375. if (ev->current_switch_count)
  6376. return;
  6377. rcu_read_lock();
  6378. for (i = 0; i < ev->num_vdevs; i++) {
  6379. arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
  6380. if (!arvif) {
  6381. ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
  6382. vdev_ids[i]);
  6383. continue;
  6384. }
  6385. if (arvif->is_up && arvif->vif->bss_conf.csa_active)
  6386. ieee80211_csa_finish(arvif->vif);
  6387. }
  6388. rcu_read_unlock();
  6389. }
  6390. static void
  6391. ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
  6392. struct sk_buff *skb)
  6393. {
  6394. const void **tb;
  6395. const struct wmi_pdev_csa_switch_ev *ev;
  6396. const u32 *vdev_ids;
  6397. int ret;
  6398. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6399. if (IS_ERR(tb)) {
  6400. ret = PTR_ERR(tb);
  6401. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  6402. return;
  6403. }
  6404. ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
  6405. vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
  6406. if (!ev || !vdev_ids) {
  6407. ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
  6408. kfree(tb);
  6409. return;
  6410. }
  6411. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6412. "pdev csa switch count %d for pdev %d, num_vdevs %d",
  6413. ev->current_switch_count, ev->pdev_id,
  6414. ev->num_vdevs);
  6415. ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
  6416. kfree(tb);
  6417. }
  6418. static void
  6419. ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
  6420. {
  6421. const void **tb;
  6422. const struct wmi_pdev_radar_ev *ev;
  6423. struct ath11k *ar;
  6424. int ret;
  6425. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6426. if (IS_ERR(tb)) {
  6427. ret = PTR_ERR(tb);
  6428. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  6429. return;
  6430. }
  6431. ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
  6432. if (!ev) {
  6433. ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
  6434. kfree(tb);
  6435. return;
  6436. }
  6437. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6438. "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
  6439. ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
  6440. ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
  6441. ev->freq_offset, ev->sidx);
  6442. rcu_read_lock();
  6443. ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
  6444. if (!ar) {
  6445. ath11k_warn(ab, "radar detected in invalid pdev %d\n",
  6446. ev->pdev_id);
  6447. goto exit;
  6448. }
  6449. ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
  6450. ev->pdev_id);
  6451. if (ar->dfs_block_radar_events)
  6452. ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
  6453. else
  6454. ieee80211_radar_detected(ar->hw);
  6455. exit:
  6456. rcu_read_unlock();
  6457. kfree(tb);
  6458. }
  6459. static void
  6460. ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
  6461. struct sk_buff *skb)
  6462. {
  6463. struct ath11k *ar;
  6464. const void **tb;
  6465. const struct wmi_pdev_temperature_event *ev;
  6466. int ret;
  6467. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6468. if (IS_ERR(tb)) {
  6469. ret = PTR_ERR(tb);
  6470. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  6471. return;
  6472. }
  6473. ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
  6474. if (!ev) {
  6475. ath11k_warn(ab, "failed to fetch pdev temp ev");
  6476. kfree(tb);
  6477. return;
  6478. }
  6479. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6480. "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
  6481. rcu_read_lock();
  6482. ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
  6483. if (!ar) {
  6484. ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
  6485. goto exit;
  6486. }
  6487. ath11k_thermal_event_temperature(ar, ev->temp);
  6488. exit:
  6489. rcu_read_unlock();
  6490. kfree(tb);
  6491. }
  6492. static void ath11k_fils_discovery_event(struct ath11k_base *ab,
  6493. struct sk_buff *skb)
  6494. {
  6495. const void **tb;
  6496. const struct wmi_fils_discovery_event *ev;
  6497. int ret;
  6498. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6499. if (IS_ERR(tb)) {
  6500. ret = PTR_ERR(tb);
  6501. ath11k_warn(ab,
  6502. "failed to parse FILS discovery event tlv %d\n",
  6503. ret);
  6504. return;
  6505. }
  6506. ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
  6507. if (!ev) {
  6508. ath11k_warn(ab, "failed to fetch FILS discovery event\n");
  6509. kfree(tb);
  6510. return;
  6511. }
  6512. ath11k_warn(ab,
  6513. "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
  6514. ev->vdev_id, ev->fils_tt, ev->tbtt);
  6515. kfree(tb);
  6516. }
  6517. static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
  6518. struct sk_buff *skb)
  6519. {
  6520. const void **tb;
  6521. const struct wmi_probe_resp_tx_status_event *ev;
  6522. int ret;
  6523. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6524. if (IS_ERR(tb)) {
  6525. ret = PTR_ERR(tb);
  6526. ath11k_warn(ab,
  6527. "failed to parse probe response transmission status event tlv: %d\n",
  6528. ret);
  6529. return;
  6530. }
  6531. ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
  6532. if (!ev) {
  6533. ath11k_warn(ab,
  6534. "failed to fetch probe response transmission status event");
  6535. kfree(tb);
  6536. return;
  6537. }
  6538. if (ev->tx_status)
  6539. ath11k_warn(ab,
  6540. "Probe response transmission failed for vdev_id %u, status %u\n",
  6541. ev->vdev_id, ev->tx_status);
  6542. kfree(tb);
  6543. }
  6544. static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab,
  6545. u16 tag, u16 len,
  6546. const void *ptr, void *data)
  6547. {
  6548. struct wmi_wow_ev_arg *ev = data;
  6549. const char *wow_pg_fault;
  6550. int wow_pg_len;
  6551. switch (tag) {
  6552. case WMI_TAG_WOW_EVENT_INFO:
  6553. memcpy(ev, ptr, sizeof(*ev));
  6554. ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n",
  6555. ev->wake_reason, wow_reason(ev->wake_reason));
  6556. break;
  6557. case WMI_TAG_ARRAY_BYTE:
  6558. if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) {
  6559. wow_pg_fault = ptr;
  6560. /* the first 4 bytes are length */
  6561. wow_pg_len = *(int *)wow_pg_fault;
  6562. wow_pg_fault += sizeof(int);
  6563. ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n",
  6564. wow_pg_len);
  6565. ath11k_dbg_dump(ab, ATH11K_DBG_WMI,
  6566. "wow_event_info_type packet present",
  6567. "wow_pg_fault ",
  6568. wow_pg_fault,
  6569. wow_pg_len);
  6570. }
  6571. break;
  6572. default:
  6573. break;
  6574. }
  6575. return 0;
  6576. }
  6577. static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb)
  6578. {
  6579. struct wmi_wow_ev_arg ev = { };
  6580. int ret;
  6581. ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
  6582. ath11k_wmi_tlv_wow_wakeup_host_parse,
  6583. &ev);
  6584. if (ret) {
  6585. ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret);
  6586. return;
  6587. }
  6588. complete(&ab->wow.wakeup_completed);
  6589. }
  6590. static void
  6591. ath11k_wmi_diag_event(struct ath11k_base *ab,
  6592. struct sk_buff *skb)
  6593. {
  6594. trace_ath11k_wmi_diag(ab, skb->data, skb->len);
  6595. }
  6596. static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
  6597. {
  6598. switch (status) {
  6599. case WMI_ADD_TWT_STATUS_OK:
  6600. return "ok";
  6601. case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
  6602. return "twt disabled";
  6603. case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
  6604. return "dialog id in use";
  6605. case WMI_ADD_TWT_STATUS_INVALID_PARAM:
  6606. return "invalid parameters";
  6607. case WMI_ADD_TWT_STATUS_NOT_READY:
  6608. return "not ready";
  6609. case WMI_ADD_TWT_STATUS_NO_RESOURCE:
  6610. return "resource unavailable";
  6611. case WMI_ADD_TWT_STATUS_NO_ACK:
  6612. return "no ack";
  6613. case WMI_ADD_TWT_STATUS_NO_RESPONSE:
  6614. return "no response";
  6615. case WMI_ADD_TWT_STATUS_DENIED:
  6616. return "denied";
  6617. case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
  6618. fallthrough;
  6619. default:
  6620. return "unknown error";
  6621. }
  6622. }
  6623. static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
  6624. struct sk_buff *skb)
  6625. {
  6626. const void **tb;
  6627. const struct wmi_twt_add_dialog_event *ev;
  6628. int ret;
  6629. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6630. if (IS_ERR(tb)) {
  6631. ret = PTR_ERR(tb);
  6632. ath11k_warn(ab,
  6633. "failed to parse wmi twt add dialog status event tlv: %d\n",
  6634. ret);
  6635. return;
  6636. }
  6637. ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
  6638. if (!ev) {
  6639. ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
  6640. goto exit;
  6641. }
  6642. if (ev->status)
  6643. ath11k_warn(ab,
  6644. "wmi add twt dialog event vdev %d dialog id %d status %s\n",
  6645. ev->vdev_id, ev->dialog_id,
  6646. ath11k_wmi_twt_add_dialog_event_status(ev->status));
  6647. exit:
  6648. kfree(tb);
  6649. }
  6650. static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
  6651. struct sk_buff *skb)
  6652. {
  6653. const void **tb;
  6654. const struct wmi_gtk_offload_status_event *ev;
  6655. struct ath11k_vif *arvif;
  6656. __be64 replay_ctr_be;
  6657. u64 replay_ctr;
  6658. int ret;
  6659. tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
  6660. if (IS_ERR(tb)) {
  6661. ret = PTR_ERR(tb);
  6662. ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
  6663. return;
  6664. }
  6665. ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
  6666. if (!ev) {
  6667. ath11k_warn(ab, "failed to fetch gtk offload status ev");
  6668. kfree(tb);
  6669. return;
  6670. }
  6671. rcu_read_lock();
  6672. arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
  6673. if (!arvif) {
  6674. ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
  6675. ev->vdev_id);
  6676. goto exit;
  6677. }
  6678. ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
  6679. ev->refresh_cnt);
  6680. ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
  6681. NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
  6682. replay_ctr = ev->replay_ctr.word1;
  6683. replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
  6684. arvif->rekey_data.replay_ctr = replay_ctr;
  6685. /* supplicant expects big-endian replay counter */
  6686. replay_ctr_be = cpu_to_be64(replay_ctr);
  6687. ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
  6688. (void *)&replay_ctr_be, GFP_ATOMIC);
  6689. exit:
  6690. rcu_read_unlock();
  6691. kfree(tb);
  6692. }
  6693. static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
  6694. {
  6695. struct wmi_cmd_hdr *cmd_hdr;
  6696. enum wmi_tlv_event_id id;
  6697. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  6698. id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
  6699. trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
  6700. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  6701. goto out;
  6702. switch (id) {
  6703. /* Process all the WMI events here */
  6704. case WMI_SERVICE_READY_EVENTID:
  6705. ath11k_service_ready_event(ab, skb);
  6706. break;
  6707. case WMI_SERVICE_READY_EXT_EVENTID:
  6708. ath11k_service_ready_ext_event(ab, skb);
  6709. break;
  6710. case WMI_SERVICE_READY_EXT2_EVENTID:
  6711. ath11k_service_ready_ext2_event(ab, skb);
  6712. break;
  6713. case WMI_REG_CHAN_LIST_CC_EVENTID:
  6714. ath11k_reg_chan_list_event(ab, skb);
  6715. break;
  6716. case WMI_READY_EVENTID:
  6717. ath11k_ready_event(ab, skb);
  6718. break;
  6719. case WMI_PEER_DELETE_RESP_EVENTID:
  6720. ath11k_peer_delete_resp_event(ab, skb);
  6721. break;
  6722. case WMI_VDEV_START_RESP_EVENTID:
  6723. ath11k_vdev_start_resp_event(ab, skb);
  6724. break;
  6725. case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
  6726. ath11k_bcn_tx_status_event(ab, skb);
  6727. break;
  6728. case WMI_VDEV_STOPPED_EVENTID:
  6729. ath11k_vdev_stopped_event(ab, skb);
  6730. break;
  6731. case WMI_MGMT_RX_EVENTID:
  6732. ath11k_mgmt_rx_event(ab, skb);
  6733. /* mgmt_rx_event() owns the skb now! */
  6734. return;
  6735. case WMI_MGMT_TX_COMPLETION_EVENTID:
  6736. ath11k_mgmt_tx_compl_event(ab, skb);
  6737. break;
  6738. case WMI_SCAN_EVENTID:
  6739. ath11k_scan_event(ab, skb);
  6740. break;
  6741. case WMI_PEER_STA_KICKOUT_EVENTID:
  6742. ath11k_peer_sta_kickout_event(ab, skb);
  6743. break;
  6744. case WMI_ROAM_EVENTID:
  6745. ath11k_roam_event(ab, skb);
  6746. break;
  6747. case WMI_CHAN_INFO_EVENTID:
  6748. ath11k_chan_info_event(ab, skb);
  6749. break;
  6750. case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
  6751. ath11k_pdev_bss_chan_info_event(ab, skb);
  6752. break;
  6753. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  6754. ath11k_vdev_install_key_compl_event(ab, skb);
  6755. break;
  6756. case WMI_SERVICE_AVAILABLE_EVENTID:
  6757. ath11k_service_available_event(ab, skb);
  6758. break;
  6759. case WMI_PEER_ASSOC_CONF_EVENTID:
  6760. ath11k_peer_assoc_conf_event(ab, skb);
  6761. break;
  6762. case WMI_UPDATE_STATS_EVENTID:
  6763. ath11k_update_stats_event(ab, skb);
  6764. break;
  6765. case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
  6766. ath11k_pdev_ctl_failsafe_check_event(ab, skb);
  6767. break;
  6768. case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
  6769. ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
  6770. break;
  6771. case WMI_PDEV_TEMPERATURE_EVENTID:
  6772. ath11k_wmi_pdev_temperature_event(ab, skb);
  6773. break;
  6774. case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
  6775. ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
  6776. break;
  6777. case WMI_HOST_FILS_DISCOVERY_EVENTID:
  6778. ath11k_fils_discovery_event(ab, skb);
  6779. break;
  6780. case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
  6781. ath11k_probe_resp_tx_status_event(ab, skb);
  6782. break;
  6783. case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
  6784. ath11k_wmi_obss_color_collision_event(ab, skb);
  6785. break;
  6786. case WMI_TWT_ADD_DIALOG_EVENTID:
  6787. ath11k_wmi_twt_add_dialog_event(ab, skb);
  6788. break;
  6789. /* add Unsupported events here */
  6790. case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
  6791. case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
  6792. case WMI_TWT_ENABLE_EVENTID:
  6793. case WMI_TWT_DISABLE_EVENTID:
  6794. case WMI_TWT_DEL_DIALOG_EVENTID:
  6795. case WMI_TWT_PAUSE_DIALOG_EVENTID:
  6796. case WMI_TWT_RESUME_DIALOG_EVENTID:
  6797. case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
  6798. case WMI_PEER_CREATE_CONF_EVENTID:
  6799. ath11k_dbg(ab, ATH11K_DBG_WMI,
  6800. "ignoring unsupported event 0x%x\n", id);
  6801. break;
  6802. case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
  6803. ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
  6804. break;
  6805. case WMI_VDEV_DELETE_RESP_EVENTID:
  6806. ath11k_vdev_delete_resp_event(ab, skb);
  6807. break;
  6808. case WMI_WOW_WAKEUP_HOST_EVENTID:
  6809. ath11k_wmi_event_wow_wakeup_host(ab, skb);
  6810. break;
  6811. case WMI_11D_NEW_COUNTRY_EVENTID:
  6812. ath11k_reg_11d_new_cc_event(ab, skb);
  6813. break;
  6814. case WMI_DIAG_EVENTID:
  6815. ath11k_wmi_diag_event(ab, skb);
  6816. break;
  6817. case WMI_PEER_STA_PS_STATECHG_EVENTID:
  6818. ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
  6819. break;
  6820. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  6821. ath11k_wmi_gtk_offload_status_event(ab, skb);
  6822. break;
  6823. /* TODO: Add remaining events */
  6824. default:
  6825. ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
  6826. break;
  6827. }
  6828. out:
  6829. dev_kfree_skb(skb);
  6830. }
  6831. static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
  6832. u32 pdev_idx)
  6833. {
  6834. int status;
  6835. u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
  6836. ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
  6837. ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
  6838. struct ath11k_htc_svc_conn_req conn_req;
  6839. struct ath11k_htc_svc_conn_resp conn_resp;
  6840. memset(&conn_req, 0, sizeof(conn_req));
  6841. memset(&conn_resp, 0, sizeof(conn_resp));
  6842. /* these fields are the same for all service endpoints */
  6843. conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
  6844. conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
  6845. conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
  6846. /* connect to control service */
  6847. conn_req.service_id = svc_id[pdev_idx];
  6848. status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
  6849. if (status) {
  6850. ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
  6851. status);
  6852. return status;
  6853. }
  6854. ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
  6855. ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
  6856. ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
  6857. init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
  6858. return 0;
  6859. }
  6860. static int
  6861. ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
  6862. struct wmi_unit_test_cmd ut_cmd,
  6863. u32 *test_args)
  6864. {
  6865. struct ath11k_pdev_wmi *wmi = ar->wmi;
  6866. struct wmi_unit_test_cmd *cmd;
  6867. struct sk_buff *skb;
  6868. struct wmi_tlv *tlv;
  6869. void *ptr;
  6870. u32 *ut_cmd_args;
  6871. int buf_len, arg_len;
  6872. int ret;
  6873. int i;
  6874. arg_len = sizeof(u32) * ut_cmd.num_args;
  6875. buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
  6876. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
  6877. if (!skb)
  6878. return -ENOMEM;
  6879. cmd = (struct wmi_unit_test_cmd *)skb->data;
  6880. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
  6881. FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
  6882. cmd->vdev_id = ut_cmd.vdev_id;
  6883. cmd->module_id = ut_cmd.module_id;
  6884. cmd->num_args = ut_cmd.num_args;
  6885. cmd->diag_token = ut_cmd.diag_token;
  6886. ptr = skb->data + sizeof(ut_cmd);
  6887. tlv = ptr;
  6888. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
  6889. FIELD_PREP(WMI_TLV_LEN, arg_len);
  6890. ptr += TLV_HDR_SIZE;
  6891. ut_cmd_args = ptr;
  6892. for (i = 0; i < ut_cmd.num_args; i++)
  6893. ut_cmd_args[i] = test_args[i];
  6894. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
  6895. if (ret) {
  6896. ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
  6897. ret);
  6898. dev_kfree_skb(skb);
  6899. }
  6900. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  6901. "WMI unit test : module %d vdev %d n_args %d token %d\n",
  6902. cmd->module_id, cmd->vdev_id, cmd->num_args,
  6903. cmd->diag_token);
  6904. return ret;
  6905. }
  6906. int ath11k_wmi_simulate_radar(struct ath11k *ar)
  6907. {
  6908. struct ath11k_vif *arvif;
  6909. u32 dfs_args[DFS_MAX_TEST_ARGS];
  6910. struct wmi_unit_test_cmd wmi_ut;
  6911. bool arvif_found = false;
  6912. list_for_each_entry(arvif, &ar->arvifs, list) {
  6913. if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
  6914. arvif_found = true;
  6915. break;
  6916. }
  6917. }
  6918. if (!arvif_found)
  6919. return -EINVAL;
  6920. dfs_args[DFS_TEST_CMDID] = 0;
  6921. dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
  6922. /* Currently we could pass segment_id(b0 - b1), chirp(b2)
  6923. * freq offset (b3 - b10) to unit test. For simulation
  6924. * purpose this can be set to 0 which is valid.
  6925. */
  6926. dfs_args[DFS_TEST_RADAR_PARAM] = 0;
  6927. wmi_ut.vdev_id = arvif->vdev_id;
  6928. wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
  6929. wmi_ut.num_args = DFS_MAX_TEST_ARGS;
  6930. wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
  6931. ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
  6932. return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
  6933. }
  6934. int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
  6935. struct ath11k_fw_dbglog *dbglog)
  6936. {
  6937. struct ath11k_pdev_wmi *wmi = ar->wmi;
  6938. struct wmi_debug_log_config_cmd_fixed_param *cmd;
  6939. struct sk_buff *skb;
  6940. struct wmi_tlv *tlv;
  6941. int ret, len;
  6942. len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
  6943. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  6944. if (!skb)
  6945. return -ENOMEM;
  6946. cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
  6947. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
  6948. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  6949. cmd->dbg_log_param = dbglog->param;
  6950. tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
  6951. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
  6952. FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
  6953. switch (dbglog->param) {
  6954. case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
  6955. case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
  6956. case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
  6957. case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
  6958. cmd->value = dbglog->value;
  6959. break;
  6960. case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
  6961. case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
  6962. cmd->value = dbglog->value;
  6963. memcpy(tlv->value, module_id_bitmap,
  6964. MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
  6965. /* clear current config to be used for next user config */
  6966. memset(module_id_bitmap, 0,
  6967. MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
  6968. break;
  6969. default:
  6970. dev_kfree_skb(skb);
  6971. return -EINVAL;
  6972. }
  6973. ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
  6974. if (ret) {
  6975. ath11k_warn(ar->ab,
  6976. "failed to send WMI_DBGLOG_CFG_CMDID\n");
  6977. dev_kfree_skb(skb);
  6978. }
  6979. return ret;
  6980. }
  6981. int ath11k_wmi_connect(struct ath11k_base *ab)
  6982. {
  6983. u32 i;
  6984. u8 wmi_ep_count;
  6985. wmi_ep_count = ab->htc.wmi_ep_count;
  6986. if (wmi_ep_count > ab->hw_params.max_radios)
  6987. return -1;
  6988. for (i = 0; i < wmi_ep_count; i++)
  6989. ath11k_connect_pdev_htc_service(ab, i);
  6990. return 0;
  6991. }
  6992. static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
  6993. {
  6994. if (WARN_ON(pdev_id >= MAX_RADIOS))
  6995. return;
  6996. /* TODO: Deinit any pdev specific wmi resource */
  6997. }
  6998. int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
  6999. u8 pdev_id)
  7000. {
  7001. struct ath11k_pdev_wmi *wmi_handle;
  7002. if (pdev_id >= ab->hw_params.max_radios)
  7003. return -EINVAL;
  7004. wmi_handle = &ab->wmi_ab.wmi[pdev_id];
  7005. wmi_handle->wmi_ab = &ab->wmi_ab;
  7006. ab->wmi_ab.ab = ab;
  7007. /* TODO: Init remaining resource specific to pdev */
  7008. return 0;
  7009. }
  7010. int ath11k_wmi_attach(struct ath11k_base *ab)
  7011. {
  7012. int ret;
  7013. ret = ath11k_wmi_pdev_attach(ab, 0);
  7014. if (ret)
  7015. return ret;
  7016. ab->wmi_ab.ab = ab;
  7017. ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
  7018. /* It's overwritten when service_ext_ready is handled */
  7019. if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
  7020. ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
  7021. /* TODO: Init remaining wmi soc resources required */
  7022. init_completion(&ab->wmi_ab.service_ready);
  7023. init_completion(&ab->wmi_ab.unified_ready);
  7024. return 0;
  7025. }
  7026. void ath11k_wmi_detach(struct ath11k_base *ab)
  7027. {
  7028. int i;
  7029. /* TODO: Deinit wmi resource specific to SOC as required */
  7030. for (i = 0; i < ab->htc.wmi_ep_count; i++)
  7031. ath11k_wmi_pdev_detach(ab, i);
  7032. ath11k_wmi_free_dbring_caps(ab);
  7033. }
  7034. int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
  7035. u32 filter_bitmap, bool enable)
  7036. {
  7037. struct wmi_hw_data_filter_cmd *cmd;
  7038. struct sk_buff *skb;
  7039. int len;
  7040. len = sizeof(*cmd);
  7041. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7042. if (!skb)
  7043. return -ENOMEM;
  7044. cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
  7045. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
  7046. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7047. cmd->vdev_id = vdev_id;
  7048. cmd->enable = enable;
  7049. /* Set all modes in case of disable */
  7050. if (cmd->enable)
  7051. cmd->hw_filter_bitmap = filter_bitmap;
  7052. else
  7053. cmd->hw_filter_bitmap = ((u32)~0U);
  7054. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  7055. "wmi hw data filter enable %d filter_bitmap 0x%x\n",
  7056. enable, filter_bitmap);
  7057. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
  7058. }
  7059. int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
  7060. {
  7061. struct wmi_wow_host_wakeup_ind *cmd;
  7062. struct sk_buff *skb;
  7063. size_t len;
  7064. len = sizeof(*cmd);
  7065. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7066. if (!skb)
  7067. return -ENOMEM;
  7068. cmd = (struct wmi_wow_host_wakeup_ind *)skb->data;
  7069. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7070. WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) |
  7071. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7072. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
  7073. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
  7074. }
  7075. int ath11k_wmi_wow_enable(struct ath11k *ar)
  7076. {
  7077. struct wmi_wow_enable_cmd *cmd;
  7078. struct sk_buff *skb;
  7079. int len;
  7080. len = sizeof(*cmd);
  7081. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7082. if (!skb)
  7083. return -ENOMEM;
  7084. cmd = (struct wmi_wow_enable_cmd *)skb->data;
  7085. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) |
  7086. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7087. cmd->enable = 1;
  7088. cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED;
  7089. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow enable\n");
  7090. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
  7091. }
  7092. int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
  7093. const u8 mac_addr[ETH_ALEN])
  7094. {
  7095. struct sk_buff *skb;
  7096. struct wmi_scan_prob_req_oui_cmd *cmd;
  7097. u32 prob_req_oui;
  7098. int len;
  7099. prob_req_oui = (((u32)mac_addr[0]) << 16) |
  7100. (((u32)mac_addr[1]) << 8) | mac_addr[2];
  7101. len = sizeof(*cmd);
  7102. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7103. if (!skb)
  7104. return -ENOMEM;
  7105. cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
  7106. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7107. WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
  7108. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7109. cmd->prob_req_oui = prob_req_oui;
  7110. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n",
  7111. prob_req_oui);
  7112. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
  7113. }
  7114. int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
  7115. enum wmi_wow_wakeup_event event,
  7116. u32 enable)
  7117. {
  7118. struct wmi_wow_add_del_event_cmd *cmd;
  7119. struct sk_buff *skb;
  7120. size_t len;
  7121. len = sizeof(*cmd);
  7122. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7123. if (!skb)
  7124. return -ENOMEM;
  7125. cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
  7126. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
  7127. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7128. cmd->vdev_id = vdev_id;
  7129. cmd->is_add = enable;
  7130. cmd->event_bitmap = (1 << event);
  7131. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
  7132. wow_wakeup_event(event), enable, vdev_id);
  7133. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
  7134. }
  7135. int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
  7136. const u8 *pattern, const u8 *mask,
  7137. int pattern_len, int pattern_offset)
  7138. {
  7139. struct wmi_wow_add_pattern_cmd *cmd;
  7140. struct wmi_wow_bitmap_pattern *bitmap;
  7141. struct wmi_tlv *tlv;
  7142. struct sk_buff *skb;
  7143. u8 *ptr;
  7144. size_t len;
  7145. len = sizeof(*cmd) +
  7146. sizeof(*tlv) + /* array struct */
  7147. sizeof(*bitmap) + /* bitmap */
  7148. sizeof(*tlv) + /* empty ipv4 sync */
  7149. sizeof(*tlv) + /* empty ipv6 sync */
  7150. sizeof(*tlv) + /* empty magic */
  7151. sizeof(*tlv) + /* empty info timeout */
  7152. sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
  7153. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7154. if (!skb)
  7155. return -ENOMEM;
  7156. /* cmd */
  7157. ptr = (u8 *)skb->data;
  7158. cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
  7159. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7160. WMI_TAG_WOW_ADD_PATTERN_CMD) |
  7161. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7162. cmd->vdev_id = vdev_id;
  7163. cmd->pattern_id = pattern_id;
  7164. cmd->pattern_type = WOW_BITMAP_PATTERN;
  7165. ptr += sizeof(*cmd);
  7166. /* bitmap */
  7167. tlv = (struct wmi_tlv *)ptr;
  7168. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7169. WMI_TAG_ARRAY_STRUCT) |
  7170. FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
  7171. ptr += sizeof(*tlv);
  7172. bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
  7173. bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7174. WMI_TAG_WOW_BITMAP_PATTERN_T) |
  7175. FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
  7176. memcpy(bitmap->patternbuf, pattern, pattern_len);
  7177. ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
  7178. memcpy(bitmap->bitmaskbuf, mask, pattern_len);
  7179. ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
  7180. bitmap->pattern_offset = pattern_offset;
  7181. bitmap->pattern_len = pattern_len;
  7182. bitmap->bitmask_len = pattern_len;
  7183. bitmap->pattern_id = pattern_id;
  7184. ptr += sizeof(*bitmap);
  7185. /* ipv4 sync */
  7186. tlv = (struct wmi_tlv *)ptr;
  7187. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7188. WMI_TAG_ARRAY_STRUCT) |
  7189. FIELD_PREP(WMI_TLV_LEN, 0);
  7190. ptr += sizeof(*tlv);
  7191. /* ipv6 sync */
  7192. tlv = (struct wmi_tlv *)ptr;
  7193. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7194. WMI_TAG_ARRAY_STRUCT) |
  7195. FIELD_PREP(WMI_TLV_LEN, 0);
  7196. ptr += sizeof(*tlv);
  7197. /* magic */
  7198. tlv = (struct wmi_tlv *)ptr;
  7199. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7200. WMI_TAG_ARRAY_STRUCT) |
  7201. FIELD_PREP(WMI_TLV_LEN, 0);
  7202. ptr += sizeof(*tlv);
  7203. /* pattern info timeout */
  7204. tlv = (struct wmi_tlv *)ptr;
  7205. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7206. WMI_TAG_ARRAY_UINT32) |
  7207. FIELD_PREP(WMI_TLV_LEN, 0);
  7208. ptr += sizeof(*tlv);
  7209. /* ratelimit interval */
  7210. tlv = (struct wmi_tlv *)ptr;
  7211. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7212. WMI_TAG_ARRAY_UINT32) |
  7213. FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
  7214. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
  7215. vdev_id, pattern_id, pattern_offset);
  7216. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
  7217. }
  7218. int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
  7219. {
  7220. struct wmi_wow_del_pattern_cmd *cmd;
  7221. struct sk_buff *skb;
  7222. size_t len;
  7223. len = sizeof(*cmd);
  7224. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7225. if (!skb)
  7226. return -ENOMEM;
  7227. cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
  7228. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7229. WMI_TAG_WOW_DEL_PATTERN_CMD) |
  7230. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7231. cmd->vdev_id = vdev_id;
  7232. cmd->pattern_id = pattern_id;
  7233. cmd->pattern_type = WOW_BITMAP_PATTERN;
  7234. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
  7235. vdev_id, pattern_id);
  7236. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
  7237. }
  7238. static struct sk_buff *
  7239. ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
  7240. u32 vdev_id,
  7241. struct wmi_pno_scan_req *pno)
  7242. {
  7243. struct nlo_configured_parameters *nlo_list;
  7244. struct wmi_wow_nlo_config_cmd *cmd;
  7245. struct wmi_tlv *tlv;
  7246. struct sk_buff *skb;
  7247. u32 *channel_list;
  7248. size_t len, nlo_list_len, channel_list_len;
  7249. u8 *ptr;
  7250. u32 i;
  7251. len = sizeof(*cmd) +
  7252. sizeof(*tlv) +
  7253. /* TLV place holder for array of structures
  7254. * nlo_configured_parameters(nlo_list)
  7255. */
  7256. sizeof(*tlv);
  7257. /* TLV place holder for array of uint32 channel_list */
  7258. channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
  7259. len += channel_list_len;
  7260. nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
  7261. len += nlo_list_len;
  7262. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7263. if (!skb)
  7264. return ERR_PTR(-ENOMEM);
  7265. ptr = (u8 *)skb->data;
  7266. cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
  7267. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
  7268. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7269. cmd->vdev_id = pno->vdev_id;
  7270. cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
  7271. /* current FW does not support min-max range for dwell time */
  7272. cmd->active_dwell_time = pno->active_max_time;
  7273. cmd->passive_dwell_time = pno->passive_max_time;
  7274. if (pno->do_passive_scan)
  7275. cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
  7276. cmd->fast_scan_period = pno->fast_scan_period;
  7277. cmd->slow_scan_period = pno->slow_scan_period;
  7278. cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
  7279. cmd->delay_start_time = pno->delay_start_time;
  7280. if (pno->enable_pno_scan_randomization) {
  7281. cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
  7282. WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
  7283. ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
  7284. ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
  7285. ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
  7286. ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
  7287. }
  7288. ptr += sizeof(*cmd);
  7289. /* nlo_configured_parameters(nlo_list) */
  7290. cmd->no_of_ssids = pno->uc_networks_count;
  7291. tlv = (struct wmi_tlv *)ptr;
  7292. tlv->header = FIELD_PREP(WMI_TLV_TAG,
  7293. WMI_TAG_ARRAY_STRUCT) |
  7294. FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
  7295. ptr += sizeof(*tlv);
  7296. nlo_list = (struct nlo_configured_parameters *)ptr;
  7297. for (i = 0; i < cmd->no_of_ssids; i++) {
  7298. tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
  7299. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  7300. FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
  7301. nlo_list[i].ssid.valid = true;
  7302. nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
  7303. memcpy(nlo_list[i].ssid.ssid.ssid,
  7304. pno->a_networks[i].ssid.ssid,
  7305. nlo_list[i].ssid.ssid.ssid_len);
  7306. ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
  7307. roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
  7308. if (pno->a_networks[i].rssi_threshold &&
  7309. pno->a_networks[i].rssi_threshold > -300) {
  7310. nlo_list[i].rssi_cond.valid = true;
  7311. nlo_list[i].rssi_cond.rssi =
  7312. pno->a_networks[i].rssi_threshold;
  7313. }
  7314. nlo_list[i].bcast_nw_type.valid = true;
  7315. nlo_list[i].bcast_nw_type.bcast_nw_type =
  7316. pno->a_networks[i].bcast_nw_type;
  7317. }
  7318. ptr += nlo_list_len;
  7319. cmd->num_of_channels = pno->a_networks[0].channel_count;
  7320. tlv = (struct wmi_tlv *)ptr;
  7321. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
  7322. FIELD_PREP(WMI_TLV_LEN, channel_list_len);
  7323. ptr += sizeof(*tlv);
  7324. channel_list = (u32 *)ptr;
  7325. for (i = 0; i < cmd->num_of_channels; i++)
  7326. channel_list[i] = pno->a_networks[0].channels[i];
  7327. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
  7328. vdev_id);
  7329. return skb;
  7330. }
  7331. static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
  7332. u32 vdev_id)
  7333. {
  7334. struct wmi_wow_nlo_config_cmd *cmd;
  7335. struct sk_buff *skb;
  7336. size_t len;
  7337. len = sizeof(*cmd);
  7338. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7339. if (!skb)
  7340. return ERR_PTR(-ENOMEM);
  7341. cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
  7342. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
  7343. FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
  7344. cmd->vdev_id = vdev_id;
  7345. cmd->flags = WMI_NLO_CONFIG_STOP;
  7346. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  7347. "wmi tlv stop pno config vdev_id %d\n", vdev_id);
  7348. return skb;
  7349. }
  7350. int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
  7351. struct wmi_pno_scan_req *pno_scan)
  7352. {
  7353. struct sk_buff *skb;
  7354. if (pno_scan->enable)
  7355. skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
  7356. else
  7357. skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
  7358. if (IS_ERR_OR_NULL(skb))
  7359. return -ENOMEM;
  7360. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
  7361. }
  7362. static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
  7363. struct ath11k_arp_ns_offload *offload,
  7364. u8 **ptr,
  7365. bool enable,
  7366. bool ext)
  7367. {
  7368. struct wmi_ns_offload_tuple *ns;
  7369. struct wmi_tlv *tlv;
  7370. u8 *buf_ptr = *ptr;
  7371. u32 ns_cnt, ns_ext_tuples;
  7372. int i, max_offloads;
  7373. ns_cnt = offload->ipv6_count;
  7374. tlv = (struct wmi_tlv *)buf_ptr;
  7375. if (ext) {
  7376. ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
  7377. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  7378. FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
  7379. i = WMI_MAX_NS_OFFLOADS;
  7380. max_offloads = offload->ipv6_count;
  7381. } else {
  7382. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  7383. FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
  7384. i = 0;
  7385. max_offloads = WMI_MAX_NS_OFFLOADS;
  7386. }
  7387. buf_ptr += sizeof(*tlv);
  7388. for (; i < max_offloads; i++) {
  7389. ns = (struct wmi_ns_offload_tuple *)buf_ptr;
  7390. ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
  7391. FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
  7392. if (enable) {
  7393. if (i < ns_cnt)
  7394. ns->flags |= WMI_NSOL_FLAGS_VALID;
  7395. memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
  7396. memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
  7397. ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
  7398. ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
  7399. if (offload->ipv6_type[i])
  7400. ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
  7401. memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
  7402. ath11k_ce_byte_swap(ns->target_mac.addr, 8);
  7403. if (ns->target_mac.word0 != 0 ||
  7404. ns->target_mac.word1 != 0) {
  7405. ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
  7406. }
  7407. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  7408. "wmi index %d ns_solicited %pI6 target %pI6",
  7409. i, ns->solicitation_ipaddr,
  7410. ns->target_ipaddr[0]);
  7411. }
  7412. buf_ptr += sizeof(*ns);
  7413. }
  7414. *ptr = buf_ptr;
  7415. }
  7416. static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
  7417. struct ath11k_arp_ns_offload *offload,
  7418. u8 **ptr,
  7419. bool enable)
  7420. {
  7421. struct wmi_arp_offload_tuple *arp;
  7422. struct wmi_tlv *tlv;
  7423. u8 *buf_ptr = *ptr;
  7424. int i;
  7425. /* fill arp tuple */
  7426. tlv = (struct wmi_tlv *)buf_ptr;
  7427. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
  7428. FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
  7429. buf_ptr += sizeof(*tlv);
  7430. for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
  7431. arp = (struct wmi_arp_offload_tuple *)buf_ptr;
  7432. arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
  7433. FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
  7434. if (enable && i < offload->ipv4_count) {
  7435. /* Copy the target ip addr and flags */
  7436. arp->flags = WMI_ARPOL_FLAGS_VALID;
  7437. memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
  7438. ath11k_ce_byte_swap(arp->target_ipaddr, 4);
  7439. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
  7440. arp->target_ipaddr);
  7441. }
  7442. buf_ptr += sizeof(*arp);
  7443. }
  7444. *ptr = buf_ptr;
  7445. }
  7446. int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
  7447. struct ath11k_vif *arvif, bool enable)
  7448. {
  7449. struct ath11k_arp_ns_offload *offload;
  7450. struct wmi_set_arp_ns_offload_cmd *cmd;
  7451. struct wmi_tlv *tlv;
  7452. struct sk_buff *skb;
  7453. u8 *buf_ptr;
  7454. size_t len;
  7455. u8 ns_cnt, ns_ext_tuples = 0;
  7456. offload = &arvif->arp_ns_offload;
  7457. ns_cnt = offload->ipv6_count;
  7458. len = sizeof(*cmd) +
  7459. sizeof(*tlv) +
  7460. WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
  7461. sizeof(*tlv) +
  7462. WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
  7463. if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
  7464. ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
  7465. len += sizeof(*tlv) +
  7466. ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
  7467. }
  7468. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7469. if (!skb)
  7470. return -ENOMEM;
  7471. buf_ptr = skb->data;
  7472. cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
  7473. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7474. WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
  7475. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7476. cmd->flags = 0;
  7477. cmd->vdev_id = arvif->vdev_id;
  7478. cmd->num_ns_ext_tuples = ns_ext_tuples;
  7479. buf_ptr += sizeof(*cmd);
  7480. ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
  7481. ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
  7482. if (ns_ext_tuples)
  7483. ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
  7484. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
  7485. }
  7486. int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
  7487. struct ath11k_vif *arvif, bool enable)
  7488. {
  7489. struct wmi_gtk_rekey_offload_cmd *cmd;
  7490. struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
  7491. int len;
  7492. struct sk_buff *skb;
  7493. __le64 replay_ctr;
  7494. len = sizeof(*cmd);
  7495. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7496. if (!skb)
  7497. return -ENOMEM;
  7498. cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
  7499. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
  7500. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7501. cmd->vdev_id = arvif->vdev_id;
  7502. if (enable) {
  7503. cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
  7504. /* the length in rekey_data and cmd is equal */
  7505. memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
  7506. ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
  7507. memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
  7508. ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
  7509. replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
  7510. memcpy(cmd->replay_ctr, &replay_ctr,
  7511. sizeof(replay_ctr));
  7512. ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
  7513. } else {
  7514. cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
  7515. }
  7516. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
  7517. arvif->vdev_id, enable);
  7518. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
  7519. }
  7520. int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
  7521. struct ath11k_vif *arvif)
  7522. {
  7523. struct wmi_gtk_rekey_offload_cmd *cmd;
  7524. int len;
  7525. struct sk_buff *skb;
  7526. len = sizeof(*cmd);
  7527. skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
  7528. if (!skb)
  7529. return -ENOMEM;
  7530. cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
  7531. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
  7532. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7533. cmd->vdev_id = arvif->vdev_id;
  7534. cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
  7535. ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
  7536. arvif->vdev_id);
  7537. return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
  7538. }
  7539. int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
  7540. { struct ath11k_pdev_wmi *wmi = ar->wmi;
  7541. struct wmi_pdev_set_sar_table_cmd *cmd;
  7542. struct wmi_tlv *tlv;
  7543. struct sk_buff *skb;
  7544. u8 *buf_ptr;
  7545. u32 len, sar_len_aligned, rsvd_len_aligned;
  7546. sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
  7547. rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
  7548. len = sizeof(*cmd) +
  7549. TLV_HDR_SIZE + sar_len_aligned +
  7550. TLV_HDR_SIZE + rsvd_len_aligned;
  7551. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  7552. if (!skb)
  7553. return -ENOMEM;
  7554. cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
  7555. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
  7556. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7557. cmd->pdev_id = ar->pdev->pdev_id;
  7558. cmd->sar_len = BIOS_SAR_TABLE_LEN;
  7559. cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
  7560. buf_ptr = skb->data + sizeof(*cmd);
  7561. tlv = (struct wmi_tlv *)buf_ptr;
  7562. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  7563. FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
  7564. buf_ptr += TLV_HDR_SIZE;
  7565. memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
  7566. buf_ptr += sar_len_aligned;
  7567. tlv = (struct wmi_tlv *)buf_ptr;
  7568. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  7569. FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
  7570. return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
  7571. }
  7572. int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
  7573. {
  7574. struct ath11k_pdev_wmi *wmi = ar->wmi;
  7575. struct wmi_pdev_set_geo_table_cmd *cmd;
  7576. struct wmi_tlv *tlv;
  7577. struct sk_buff *skb;
  7578. u8 *buf_ptr;
  7579. u32 len, rsvd_len_aligned;
  7580. rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
  7581. len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
  7582. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  7583. if (!skb)
  7584. return -ENOMEM;
  7585. cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
  7586. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
  7587. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7588. cmd->pdev_id = ar->pdev->pdev_id;
  7589. cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
  7590. buf_ptr = skb->data + sizeof(*cmd);
  7591. tlv = (struct wmi_tlv *)buf_ptr;
  7592. tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
  7593. FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
  7594. return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
  7595. }
  7596. int ath11k_wmi_sta_keepalive(struct ath11k *ar,
  7597. const struct wmi_sta_keepalive_arg *arg)
  7598. {
  7599. struct ath11k_pdev_wmi *wmi = ar->wmi;
  7600. struct wmi_sta_keepalive_cmd *cmd;
  7601. struct wmi_sta_keepalive_arp_resp *arp;
  7602. struct sk_buff *skb;
  7603. size_t len;
  7604. len = sizeof(*cmd) + sizeof(*arp);
  7605. skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
  7606. if (!skb)
  7607. return -ENOMEM;
  7608. cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
  7609. cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7610. WMI_TAG_STA_KEEPALIVE_CMD) |
  7611. FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
  7612. cmd->vdev_id = arg->vdev_id;
  7613. cmd->enabled = arg->enabled;
  7614. cmd->interval = arg->interval;
  7615. cmd->method = arg->method;
  7616. arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
  7617. arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
  7618. WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
  7619. FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
  7620. if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
  7621. arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
  7622. arp->src_ip4_addr = arg->src_ip4_addr;
  7623. arp->dest_ip4_addr = arg->dest_ip4_addr;
  7624. ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
  7625. }
  7626. ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
  7627. "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
  7628. arg->vdev_id, arg->enabled, arg->method, arg->interval);
  7629. return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
  7630. }