pci.c 195 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/completion.h>
  7. #include <linux/io.h>
  8. #include <linux/irq.h>
  9. #include <linux/memblock.h>
  10. #include <linux/module.h>
  11. #include <linux/msi.h>
  12. #include <linux/of.h>
  13. #include <linux/of_gpio.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/suspend.h>
  16. #include <linux/version.h>
  17. #include <linux/sched.h>
  18. #include "main.h"
  19. #include "bus.h"
  20. #include "debug.h"
  21. #include "pci.h"
  22. #include "pci_platform.h"
  23. #include "reg.h"
  24. #define PCI_LINK_UP 1
  25. #define PCI_LINK_DOWN 0
  26. #define SAVE_PCI_CONFIG_SPACE 1
  27. #define RESTORE_PCI_CONFIG_SPACE 0
  28. #define PCI_BAR_NUM 0
  29. #define PCI_INVALID_READ(val) ((val) == U32_MAX)
  30. #define PCI_DMA_MASK_32_BIT DMA_BIT_MASK(32)
  31. #define PCI_DMA_MASK_36_BIT DMA_BIT_MASK(36)
  32. #define PCI_DMA_MASK_64_BIT DMA_BIT_MASK(64)
  33. #define MHI_NODE_NAME "qcom,mhi"
  34. #define MHI_MSI_NAME "MHI"
  35. #define QCA6390_PATH_PREFIX "qca6390/"
  36. #define QCA6490_PATH_PREFIX "qca6490/"
  37. #define QCN7605_PATH_PREFIX "qcn7605/"
  38. #define KIWI_PATH_PREFIX "kiwi/"
  39. #define MANGO_PATH_PREFIX "mango/"
  40. #define PEACH_PATH_PREFIX "peach/"
  41. #define DEFAULT_PHY_M3_FILE_NAME "m3.bin"
  42. #define DEFAULT_AUX_FILE_NAME "aux_ucode.elf"
  43. #define DEFAULT_PHY_UCODE_FILE_NAME "phy_ucode.elf"
  44. #define TME_PATCH_FILE_NAME "tmel_patch.elf"
  45. #define PHY_UCODE_V2_FILE_NAME "phy_ucode20.elf"
  46. #define DEFAULT_FW_FILE_NAME "amss.bin"
  47. #define FW_V2_FILE_NAME "amss20.bin"
  48. #define FW_V2_FTM_FILE_NAME "amss20_ftm.bin"
  49. #define DEVICE_MAJOR_VERSION_MASK 0xF
  50. #define WAKE_MSI_NAME "WAKE"
  51. #define DEV_RDDM_TIMEOUT 5000
  52. #define WAKE_EVENT_TIMEOUT 5000
  53. #ifdef CONFIG_CNSS_EMULATION
  54. #define EMULATION_HW 1
  55. #else
  56. #define EMULATION_HW 0
  57. #endif
  58. #define RAMDUMP_SIZE_DEFAULT 0x420000
  59. #define CNSS_256KB_SIZE 0x40000
  60. #define DEVICE_RDDM_COOKIE 0xCAFECACE
  61. static bool cnss_driver_registered;
  62. static DEFINE_SPINLOCK(pci_link_down_lock);
  63. static DEFINE_SPINLOCK(pci_reg_window_lock);
  64. static DEFINE_SPINLOCK(time_sync_lock);
  65. #define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout)
  66. #define MHI_M2_TIMEOUT_MS (plat_priv->ctrl_params.mhi_m2_timeout)
  67. #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US 1000
  68. #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US 2000
  69. #define FORCE_WAKE_DELAY_MIN_US 4000
  70. #define FORCE_WAKE_DELAY_MAX_US 6000
  71. #define FORCE_WAKE_DELAY_TIMEOUT_US 60000
  72. #define REG_RETRY_MAX_TIMES 3
  73. #define MHI_SUSPEND_RETRY_MAX_TIMES 3
  74. #define MHI_SUSPEND_RETRY_DELAY_US 5000
  75. #define BOOT_DEBUG_TIMEOUT_MS 7000
  76. #define HANG_DATA_LENGTH 384
  77. #define HST_HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH)
  78. #define HSP_HANG_DATA_OFFSET ((2 * 1024 * 1024) - HANG_DATA_LENGTH)
  79. #define AFC_SLOT_SIZE 0x1000
  80. #define AFC_MAX_SLOT 2
  81. #define AFC_MEM_SIZE (AFC_SLOT_SIZE * AFC_MAX_SLOT)
  82. #define AFC_AUTH_STATUS_OFFSET 1
  83. #define AFC_AUTH_SUCCESS 1
  84. #define AFC_AUTH_ERROR 0
  85. static const struct mhi_channel_config cnss_mhi_channels[] = {
  86. {
  87. .num = 0,
  88. .name = "LOOPBACK",
  89. .num_elements = 32,
  90. .event_ring = 1,
  91. .dir = DMA_TO_DEVICE,
  92. .ee_mask = 0x4,
  93. .pollcfg = 0,
  94. .doorbell = MHI_DB_BRST_DISABLE,
  95. .lpm_notify = false,
  96. .offload_channel = false,
  97. .doorbell_mode_switch = false,
  98. .auto_queue = false,
  99. },
  100. {
  101. .num = 1,
  102. .name = "LOOPBACK",
  103. .num_elements = 32,
  104. .event_ring = 1,
  105. .dir = DMA_FROM_DEVICE,
  106. .ee_mask = 0x4,
  107. .pollcfg = 0,
  108. .doorbell = MHI_DB_BRST_DISABLE,
  109. .lpm_notify = false,
  110. .offload_channel = false,
  111. .doorbell_mode_switch = false,
  112. .auto_queue = false,
  113. },
  114. {
  115. .num = 4,
  116. .name = "DIAG",
  117. .num_elements = 64,
  118. .event_ring = 1,
  119. .dir = DMA_TO_DEVICE,
  120. .ee_mask = 0x4,
  121. .pollcfg = 0,
  122. .doorbell = MHI_DB_BRST_DISABLE,
  123. .lpm_notify = false,
  124. .offload_channel = false,
  125. .doorbell_mode_switch = false,
  126. .auto_queue = false,
  127. },
  128. {
  129. .num = 5,
  130. .name = "DIAG",
  131. .num_elements = 64,
  132. .event_ring = 1,
  133. .dir = DMA_FROM_DEVICE,
  134. .ee_mask = 0x4,
  135. .pollcfg = 0,
  136. .doorbell = MHI_DB_BRST_DISABLE,
  137. .lpm_notify = false,
  138. .offload_channel = false,
  139. .doorbell_mode_switch = false,
  140. .auto_queue = false,
  141. },
  142. {
  143. .num = 20,
  144. .name = "IPCR",
  145. .num_elements = 64,
  146. .event_ring = 1,
  147. .dir = DMA_TO_DEVICE,
  148. .ee_mask = 0x4,
  149. .pollcfg = 0,
  150. .doorbell = MHI_DB_BRST_DISABLE,
  151. .lpm_notify = false,
  152. .offload_channel = false,
  153. .doorbell_mode_switch = false,
  154. .auto_queue = false,
  155. },
  156. {
  157. .num = 21,
  158. .name = "IPCR",
  159. .num_elements = 64,
  160. .event_ring = 1,
  161. .dir = DMA_FROM_DEVICE,
  162. .ee_mask = 0x4,
  163. .pollcfg = 0,
  164. .doorbell = MHI_DB_BRST_DISABLE,
  165. .lpm_notify = false,
  166. .offload_channel = false,
  167. .doorbell_mode_switch = false,
  168. .auto_queue = true,
  169. },
  170. /* All MHI satellite config to be at the end of data struct */
  171. #if IS_ENABLED(CONFIG_MHI_SATELLITE)
  172. {
  173. .num = 50,
  174. .name = "ADSP_0",
  175. .num_elements = 64,
  176. .event_ring = 3,
  177. .dir = DMA_BIDIRECTIONAL,
  178. .ee_mask = 0x4,
  179. .pollcfg = 0,
  180. .doorbell = MHI_DB_BRST_DISABLE,
  181. .lpm_notify = false,
  182. .offload_channel = true,
  183. .doorbell_mode_switch = false,
  184. .auto_queue = false,
  185. },
  186. {
  187. .num = 51,
  188. .name = "ADSP_1",
  189. .num_elements = 64,
  190. .event_ring = 3,
  191. .dir = DMA_BIDIRECTIONAL,
  192. .ee_mask = 0x4,
  193. .pollcfg = 0,
  194. .doorbell = MHI_DB_BRST_DISABLE,
  195. .lpm_notify = false,
  196. .offload_channel = true,
  197. .doorbell_mode_switch = false,
  198. .auto_queue = false,
  199. },
  200. {
  201. .num = 70,
  202. .name = "ADSP_2",
  203. .num_elements = 64,
  204. .event_ring = 3,
  205. .dir = DMA_BIDIRECTIONAL,
  206. .ee_mask = 0x4,
  207. .pollcfg = 0,
  208. .doorbell = MHI_DB_BRST_DISABLE,
  209. .lpm_notify = false,
  210. .offload_channel = true,
  211. .doorbell_mode_switch = false,
  212. .auto_queue = false,
  213. },
  214. {
  215. .num = 71,
  216. .name = "ADSP_3",
  217. .num_elements = 64,
  218. .event_ring = 3,
  219. .dir = DMA_BIDIRECTIONAL,
  220. .ee_mask = 0x4,
  221. .pollcfg = 0,
  222. .doorbell = MHI_DB_BRST_DISABLE,
  223. .lpm_notify = false,
  224. .offload_channel = true,
  225. .doorbell_mode_switch = false,
  226. .auto_queue = false,
  227. },
  228. #endif
  229. };
  230. static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
  231. {
  232. .num = 0,
  233. .name = "LOOPBACK",
  234. .num_elements = 32,
  235. .event_ring = 1,
  236. .dir = DMA_TO_DEVICE,
  237. .ee_mask = 0x4,
  238. .pollcfg = 0,
  239. .doorbell = MHI_DB_BRST_DISABLE,
  240. .lpm_notify = false,
  241. .offload_channel = false,
  242. .doorbell_mode_switch = false,
  243. .auto_queue = false,
  244. },
  245. {
  246. .num = 1,
  247. .name = "LOOPBACK",
  248. .num_elements = 32,
  249. .event_ring = 1,
  250. .dir = DMA_FROM_DEVICE,
  251. .ee_mask = 0x4,
  252. .pollcfg = 0,
  253. .doorbell = MHI_DB_BRST_DISABLE,
  254. .lpm_notify = false,
  255. .offload_channel = false,
  256. .doorbell_mode_switch = false,
  257. .auto_queue = false,
  258. },
  259. {
  260. .num = 4,
  261. .name = "DIAG",
  262. .num_elements = 64,
  263. .event_ring = 1,
  264. .dir = DMA_TO_DEVICE,
  265. .ee_mask = 0x4,
  266. .pollcfg = 0,
  267. .doorbell = MHI_DB_BRST_DISABLE,
  268. .lpm_notify = false,
  269. .offload_channel = false,
  270. .doorbell_mode_switch = false,
  271. .auto_queue = false,
  272. },
  273. {
  274. .num = 5,
  275. .name = "DIAG",
  276. .num_elements = 64,
  277. .event_ring = 1,
  278. .dir = DMA_FROM_DEVICE,
  279. .ee_mask = 0x4,
  280. .pollcfg = 0,
  281. .doorbell = MHI_DB_BRST_DISABLE,
  282. .lpm_notify = false,
  283. .offload_channel = false,
  284. .doorbell_mode_switch = false,
  285. .auto_queue = false,
  286. },
  287. {
  288. .num = 16,
  289. .name = "IPCR",
  290. .num_elements = 64,
  291. .event_ring = 1,
  292. .dir = DMA_TO_DEVICE,
  293. .ee_mask = 0x4,
  294. .pollcfg = 0,
  295. .doorbell = MHI_DB_BRST_DISABLE,
  296. .lpm_notify = false,
  297. .offload_channel = false,
  298. .doorbell_mode_switch = false,
  299. .auto_queue = false,
  300. },
  301. {
  302. .num = 17,
  303. .name = "IPCR",
  304. .num_elements = 64,
  305. .event_ring = 1,
  306. .dir = DMA_FROM_DEVICE,
  307. .ee_mask = 0x4,
  308. .pollcfg = 0,
  309. .doorbell = MHI_DB_BRST_DISABLE,
  310. .lpm_notify = false,
  311. .offload_channel = false,
  312. .doorbell_mode_switch = false,
  313. .auto_queue = true,
  314. },
  315. };
  316. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
  317. static struct mhi_event_config cnss_mhi_events[] = {
  318. #else
  319. static const struct mhi_event_config cnss_mhi_events[] = {
  320. #endif
  321. {
  322. .num_elements = 32,
  323. .irq_moderation_ms = 0,
  324. .irq = 1,
  325. .mode = MHI_DB_BRST_DISABLE,
  326. .data_type = MHI_ER_CTRL,
  327. .priority = 0,
  328. .hardware_event = false,
  329. .client_managed = false,
  330. .offload_channel = false,
  331. },
  332. {
  333. .num_elements = 256,
  334. .irq_moderation_ms = 0,
  335. .irq = 2,
  336. .mode = MHI_DB_BRST_DISABLE,
  337. .priority = 1,
  338. .hardware_event = false,
  339. .client_managed = false,
  340. .offload_channel = false,
  341. },
  342. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  343. {
  344. .num_elements = 32,
  345. .irq_moderation_ms = 0,
  346. .irq = 1,
  347. .mode = MHI_DB_BRST_DISABLE,
  348. .data_type = MHI_ER_BW_SCALE,
  349. .priority = 2,
  350. .hardware_event = false,
  351. .client_managed = false,
  352. .offload_channel = false,
  353. },
  354. #endif
  355. #if IS_ENABLED(CONFIG_MHI_SATELLITE)
  356. {
  357. .num_elements = 256,
  358. .irq_moderation_ms = 0,
  359. .irq = 2,
  360. .mode = MHI_DB_BRST_DISABLE,
  361. .data_type = MHI_ER_DATA,
  362. .priority = 1,
  363. .hardware_event = false,
  364. .client_managed = true,
  365. .offload_channel = true,
  366. },
  367. #endif
  368. };
  369. #if IS_ENABLED(CONFIG_MHI_SATELLITE)
  370. #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
  371. #define CNSS_MHI_SATELLITE_EVT_COUNT 1
  372. #else
  373. #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
  374. #define CNSS_MHI_SATELLITE_EVT_COUNT 0
  375. #endif
  376. static const struct mhi_controller_config cnss_mhi_config_default = {
  377. #if IS_ENABLED(CONFIG_MHI_SATELLITE)
  378. .max_channels = 72,
  379. #else
  380. .max_channels = 32,
  381. #endif
  382. .timeout_ms = 10000,
  383. .use_bounce_buf = false,
  384. .buf_len = 0x8000,
  385. .num_channels = ARRAY_SIZE(cnss_mhi_channels),
  386. .ch_cfg = cnss_mhi_channels,
  387. .num_events = ARRAY_SIZE(cnss_mhi_events),
  388. .event_cfg = cnss_mhi_events,
  389. .m2_no_db = true,
  390. };
  391. static const struct mhi_controller_config cnss_mhi_config_genoa = {
  392. .max_channels = 32,
  393. .timeout_ms = 10000,
  394. .use_bounce_buf = false,
  395. .buf_len = 0x8000,
  396. .num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
  397. .ch_cfg = cnss_mhi_channels_genoa,
  398. .num_events = ARRAY_SIZE(cnss_mhi_events) -
  399. CNSS_MHI_SATELLITE_EVT_COUNT,
  400. .event_cfg = cnss_mhi_events,
  401. .m2_no_db = true,
  402. .bhie_offset = 0x0324,
  403. };
  404. static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
  405. .max_channels = 32,
  406. .timeout_ms = 10000,
  407. .use_bounce_buf = false,
  408. .buf_len = 0x8000,
  409. .num_channels = ARRAY_SIZE(cnss_mhi_channels) -
  410. CNSS_MHI_SATELLITE_CH_CFG_COUNT,
  411. .ch_cfg = cnss_mhi_channels,
  412. .num_events = ARRAY_SIZE(cnss_mhi_events) -
  413. CNSS_MHI_SATELLITE_EVT_COUNT,
  414. .event_cfg = cnss_mhi_events,
  415. .m2_no_db = true,
  416. };
  417. static struct cnss_pci_reg ce_src[] = {
  418. { "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
  419. { "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
  420. { "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
  421. { "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
  422. { "SRC_CTRL", CE_SRC_CTRL_OFFSET },
  423. { "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
  424. { "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
  425. { "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
  426. { NULL },
  427. };
  428. static struct cnss_pci_reg ce_dst[] = {
  429. { "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
  430. { "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
  431. { "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
  432. { "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
  433. { "DEST_CTRL", CE_DEST_CTRL_OFFSET },
  434. { "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
  435. { "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
  436. { "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
  437. { "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
  438. { "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
  439. { "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
  440. { "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
  441. { "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
  442. { "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
  443. { "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
  444. { NULL },
  445. };
  446. static struct cnss_pci_reg ce_cmn[] = {
  447. { "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
  448. { "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
  449. { "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
  450. { "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
  451. { "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
  452. { NULL },
  453. };
  454. static struct cnss_pci_reg qdss_csr[] = {
  455. { "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
  456. { "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
  457. { "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
  458. { "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
  459. { NULL },
  460. };
  461. static struct cnss_pci_reg pci_scratch[] = {
  462. { "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
  463. { "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
  464. { "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
  465. { NULL },
  466. };
  467. /* First field of the structure is the device bit mask. Use
  468. * enum cnss_pci_reg_mask as reference for the value.
  469. */
  470. static struct cnss_misc_reg wcss_reg_access_seq[] = {
  471. {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
  472. {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
  473. {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
  474. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
  475. {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
  476. {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
  477. {1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
  478. {1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
  479. {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
  480. {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
  481. {1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
  482. {1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
  483. {1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
  484. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
  485. {1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
  486. {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
  487. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  488. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
  489. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
  490. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
  491. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
  492. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
  493. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
  494. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
  495. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
  496. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
  497. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
  498. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
  499. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
  500. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
  501. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
  502. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
  503. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
  504. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
  505. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
  506. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
  507. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
  508. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
  509. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
  510. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
  511. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
  512. {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
  513. {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
  514. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
  515. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  516. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  517. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  518. {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
  519. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
  520. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  521. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  522. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  523. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  524. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  525. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  526. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
  527. {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
  528. {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
  529. {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
  530. {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
  531. {1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
  532. {1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
  533. {1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
  534. {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
  535. };
  536. static struct cnss_misc_reg pcie_reg_access_seq[] = {
  537. {1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
  538. {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
  539. {1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
  540. {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
  541. {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
  542. {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
  543. {1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
  544. {1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
  545. {1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
  546. {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
  547. {1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
  548. {1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
  549. {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
  550. {1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
  551. {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
  552. {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
  553. {1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
  554. {1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
  555. {1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
  556. {1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
  557. {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
  558. {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
  559. {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
  560. {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
  561. {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
  562. {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
  563. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
  564. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
  565. {1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
  566. {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
  567. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
  568. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
  569. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
  570. {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
  571. {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
  572. {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
  573. {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
  574. {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
  575. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
  576. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
  577. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
  578. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
  579. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
  580. {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
  581. {1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
  582. };
  583. static struct cnss_misc_reg wlaon_reg_access_seq[] = {
  584. {3, 0, WLAON_SOC_POWER_CTRL, 0},
  585. {3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
  586. {3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
  587. {3, 0, WLAON_SW_COLD_RESET, 0},
  588. {3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
  589. {3, 0, WLAON_GDSC_DELAY_SETTING, 0},
  590. {3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
  591. {3, 0, WLAON_WL_PWR_STATUS_REG, 0},
  592. {3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
  593. {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
  594. {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
  595. {2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
  596. {2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
  597. {2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
  598. {2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
  599. {2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
  600. {2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
  601. {2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
  602. {2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
  603. {2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
  604. {2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
  605. {2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
  606. {2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
  607. {2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
  608. {2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
  609. {2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
  610. {2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
  611. {2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
  612. {2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
  613. {2, 0, WLAON_WL_AON_CXPC_REG, 0},
  614. {2, 0, WLAON_WL_AON_APM_STATUS0, 0},
  615. {2, 0, WLAON_WL_AON_APM_STATUS1, 0},
  616. {2, 0, WLAON_WL_AON_APM_STATUS2, 0},
  617. {2, 0, WLAON_WL_AON_APM_STATUS3, 0},
  618. {2, 0, WLAON_WL_AON_APM_STATUS4, 0},
  619. {2, 0, WLAON_WL_AON_APM_STATUS5, 0},
  620. {2, 0, WLAON_WL_AON_APM_STATUS6, 0},
  621. {3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
  622. {3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
  623. {3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
  624. {3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
  625. {3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
  626. {3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
  627. {3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
  628. {3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
  629. {3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
  630. {3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
  631. {3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
  632. {3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
  633. {3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
  634. {3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
  635. {3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
  636. {3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
  637. {3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
  638. {3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
  639. {3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
  640. {3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
  641. {3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
  642. {3, 0, WLAON_QDSS_WCSS_REG, 0},
  643. {3, 0, WLAON_QDSS_WCSS_ACK, 0},
  644. {3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
  645. {3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
  646. {3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
  647. {3, 0, WLAON_DLY_CONFIG, 0},
  648. {3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
  649. {3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
  650. {3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
  651. {3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
  652. {3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
  653. {3, 0, WLAON_Q6_COOKIE_BIT, 0},
  654. {3, 0, WLAON_WARM_SW_ENTRY, 0},
  655. {3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
  656. {3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
  657. {3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
  658. {3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
  659. {3, 0, WLAON_DEBUG, 0},
  660. {3, 0, WLAON_SOC_PARAMETERS, 0},
  661. {3, 0, WLAON_WLPM_SIGNAL, 0},
  662. {3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
  663. {3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
  664. {3, 0, WLAON_PBL_STACK_CANARY, 0},
  665. {3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
  666. {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
  667. {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
  668. {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
  669. {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
  670. {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
  671. {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
  672. {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
  673. {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
  674. {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
  675. {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
  676. {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
  677. {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
  678. {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
  679. {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
  680. {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
  681. {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
  682. {3, 0, WLAON_MEM_CNT_SEL_REG, 0},
  683. {3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
  684. {3, 0, WLAON_MEM_DEBUG_REG, 0},
  685. {3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
  686. {3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
  687. {3, 0, WLAON_WL_AON_SPARE2, 0},
  688. {3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
  689. {3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
  690. {3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
  691. {3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
  692. {3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
  693. {3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
  694. {3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
  695. {3, 0, WLAON_POWERCTRL_PMU_REG, 0},
  696. {3, 0, WLAON_POWERCTRL_MEM_REG, 0},
  697. {3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
  698. {3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
  699. {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
  700. {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
  701. {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
  702. {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
  703. {3, 0, WLAON_MEM_SVS_CFG_REG, 0},
  704. {3, 0, WLAON_CMN_AON_MISC_REG, 0},
  705. {3, 0, WLAON_INTR_STATUS, 0},
  706. {2, 0, WLAON_INTR_ENABLE, 0},
  707. {2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
  708. {2, 0, WLAON_NOC_DBG_BUS_REG, 0},
  709. {2, 0, WLAON_WL_CTRL_MISC_REG, 0},
  710. {2, 0, WLAON_DBG_STATUS0, 0},
  711. {2, 0, WLAON_DBG_STATUS1, 0},
  712. {2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
  713. {2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
  714. {2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
  715. };
  716. static struct cnss_misc_reg syspm_reg_access_seq[] = {
  717. {1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
  718. {1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
  719. {1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
  720. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  721. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  722. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  723. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  724. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  725. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  726. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  727. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  728. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  729. {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
  730. };
  731. static struct cnss_print_optimize print_optimize;
  732. #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
  733. #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
  734. #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
  735. #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
  736. static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
  737. static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
  738. static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
  739. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  740. static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
  741. {
  742. mhi_debug_reg_dump(pci_priv->mhi_ctrl);
  743. }
  744. static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
  745. {
  746. mhi_dump_sfr(pci_priv->mhi_ctrl);
  747. }
  748. static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
  749. u32 cookie)
  750. {
  751. return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
  752. }
  753. static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
  754. bool notify_clients)
  755. {
  756. return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
  757. }
  758. static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
  759. bool notify_clients)
  760. {
  761. return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
  762. }
  763. static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
  764. u32 timeout)
  765. {
  766. return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
  767. }
  768. static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
  769. int timeout_us, bool in_panic)
  770. {
  771. return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
  772. timeout_us, in_panic);
  773. }
  774. #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
  775. static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
  776. {
  777. return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
  778. }
  779. #endif
  780. static void
  781. cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
  782. int (*cb)(struct mhi_controller *mhi_ctrl,
  783. struct mhi_link_info *link_info))
  784. {
  785. mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
  786. }
  787. static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
  788. {
  789. return mhi_force_reset(pci_priv->mhi_ctrl);
  790. }
  791. void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
  792. phys_addr_t base)
  793. {
  794. return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
  795. }
  796. #else
  797. static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
  798. {
  799. }
  800. static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
  801. {
  802. }
  803. static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
  804. u32 cookie)
  805. {
  806. return false;
  807. }
  808. static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
  809. bool notify_clients)
  810. {
  811. return -EOPNOTSUPP;
  812. }
  813. static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
  814. bool notify_clients)
  815. {
  816. return -EOPNOTSUPP;
  817. }
  818. static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
  819. u32 timeout)
  820. {
  821. }
  822. static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
  823. int timeout_us, bool in_panic)
  824. {
  825. return -EOPNOTSUPP;
  826. }
  827. #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
  828. static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
  829. {
  830. return -EOPNOTSUPP;
  831. }
  832. #endif
  833. static void
  834. cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
  835. int (*cb)(struct mhi_controller *mhi_ctrl,
  836. struct mhi_link_info *link_info))
  837. {
  838. }
  839. static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
  840. {
  841. return -EOPNOTSUPP;
  842. }
  843. void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
  844. phys_addr_t base)
  845. {
  846. }
  847. #endif /* CONFIG_MHI_BUS_MISC */
  848. #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
  849. #define CNSS_MHI_WAKE_TIMEOUT 500000
  850. static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
  851. enum cnss_smmu_fault_time id)
  852. {
  853. if (id >= SMMU_CB_MAX)
  854. return;
  855. pci_priv->smmu_fault_timestamp[id] = sched_clock();
  856. }
  857. static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
  858. void *handler_token)
  859. {
  860. struct cnss_pci_data *pci_priv = handler_token;
  861. int ret = 0;
  862. cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
  863. ret = cnss_mhi_device_get_sync_atomic(pci_priv,
  864. CNSS_MHI_WAKE_TIMEOUT, true);
  865. if (ret < 0) {
  866. cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
  867. return;
  868. }
  869. cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
  870. ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
  871. if (ret < 0)
  872. cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
  873. cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
  874. }
  875. void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
  876. {
  877. qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
  878. cnss_pci_smmu_fault_handler_irq, pci_priv);
  879. }
  880. #else
  881. void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
  882. {
  883. }
  884. #endif
  885. int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
  886. {
  887. u16 device_id;
  888. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  889. cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
  890. (void *)_RET_IP_);
  891. return -EACCES;
  892. }
  893. if (pci_priv->pci_link_down_ind) {
  894. cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
  895. return -EIO;
  896. }
  897. pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
  898. if (device_id != pci_priv->device_id) {
  899. cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
  900. (void *)_RET_IP_, device_id,
  901. pci_priv->device_id);
  902. return -EIO;
  903. }
  904. return 0;
  905. }
  906. static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
  907. {
  908. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  909. u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
  910. u32 window_enable = WINDOW_ENABLE_BIT | window;
  911. u32 val;
  912. if (plat_priv->device_id == PEACH_DEVICE_ID) {
  913. writel_relaxed(window_enable, pci_priv->bar +
  914. PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
  915. } else {
  916. writel_relaxed(window_enable, pci_priv->bar +
  917. QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
  918. }
  919. if (plat_priv->device_id == QCN7605_DEVICE_ID)
  920. window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
  921. if (window != pci_priv->remap_window) {
  922. pci_priv->remap_window = window;
  923. cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
  924. window_enable);
  925. }
  926. /* Read it back to make sure the write has taken effect */
  927. if (plat_priv->device_id == PEACH_DEVICE_ID) {
  928. val = readl_relaxed(pci_priv->bar +
  929. PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
  930. } else {
  931. val = readl_relaxed(pci_priv->bar +
  932. QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
  933. }
  934. if (val != window_enable) {
  935. cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
  936. window_enable, val);
  937. if (!cnss_pci_check_link_status(pci_priv) &&
  938. !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
  939. CNSS_ASSERT(0);
  940. }
  941. }
  942. static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
  943. u32 offset, u32 *val)
  944. {
  945. int ret;
  946. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  947. if (!in_interrupt() && !irqs_disabled()) {
  948. ret = cnss_pci_check_link_status(pci_priv);
  949. if (ret)
  950. return ret;
  951. }
  952. if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
  953. offset < MAX_UNWINDOWED_ADDRESS) {
  954. *val = readl_relaxed(pci_priv->bar + offset);
  955. return 0;
  956. }
  957. /* If in panic, assumption is kernel panic handler will hold all threads
  958. * and interrupts. Further pci_reg_window_lock could be held before
  959. * panic. So only lock during normal operation.
  960. */
  961. if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
  962. cnss_pci_select_window(pci_priv, offset);
  963. *val = readl_relaxed(pci_priv->bar + WINDOW_START +
  964. (offset & WINDOW_RANGE_MASK));
  965. } else {
  966. spin_lock_bh(&pci_reg_window_lock);
  967. cnss_pci_select_window(pci_priv, offset);
  968. *val = readl_relaxed(pci_priv->bar + WINDOW_START +
  969. (offset & WINDOW_RANGE_MASK));
  970. spin_unlock_bh(&pci_reg_window_lock);
  971. }
  972. return 0;
  973. }
  974. static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
  975. u32 val)
  976. {
  977. int ret;
  978. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  979. if (!in_interrupt() && !irqs_disabled()) {
  980. ret = cnss_pci_check_link_status(pci_priv);
  981. if (ret)
  982. return ret;
  983. }
  984. if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
  985. offset < MAX_UNWINDOWED_ADDRESS) {
  986. writel_relaxed(val, pci_priv->bar + offset);
  987. return 0;
  988. }
  989. /* Same constraint as PCI register read in panic */
  990. if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
  991. cnss_pci_select_window(pci_priv, offset);
  992. writel_relaxed(val, pci_priv->bar + WINDOW_START +
  993. (offset & WINDOW_RANGE_MASK));
  994. } else {
  995. spin_lock_bh(&pci_reg_window_lock);
  996. cnss_pci_select_window(pci_priv, offset);
  997. writel_relaxed(val, pci_priv->bar + WINDOW_START +
  998. (offset & WINDOW_RANGE_MASK));
  999. spin_unlock_bh(&pci_reg_window_lock);
  1000. }
  1001. return 0;
  1002. }
  1003. static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
  1004. {
  1005. struct device *dev = &pci_priv->pci_dev->dev;
  1006. int ret;
  1007. ret = cnss_pci_force_wake_request_sync(dev,
  1008. FORCE_WAKE_DELAY_TIMEOUT_US);
  1009. if (ret) {
  1010. if (ret != -EAGAIN)
  1011. cnss_pr_err("Failed to request force wake\n");
  1012. return ret;
  1013. }
  1014. /* If device's M1 state-change event races here, it can be ignored,
  1015. * as the device is expected to immediately move from M2 to M0
  1016. * without entering low power state.
  1017. */
  1018. if (cnss_pci_is_device_awake(dev) != true)
  1019. cnss_pr_warn("MHI not in M0, while reg still accessible\n");
  1020. return 0;
  1021. }
  1022. static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
  1023. {
  1024. struct device *dev = &pci_priv->pci_dev->dev;
  1025. int ret;
  1026. ret = cnss_pci_force_wake_release(dev);
  1027. if (ret && ret != -EAGAIN)
  1028. cnss_pr_err("Failed to release force wake\n");
  1029. return ret;
  1030. }
  1031. #if IS_ENABLED(CONFIG_INTERCONNECT)
  1032. /**
  1033. * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
  1034. * @plat_priv: Platform private data struct
  1035. * @bw: bandwidth
  1036. * @save: toggle flag to save bandwidth to current_bw_vote
  1037. *
  1038. * Setup bandwidth votes for configured interconnect paths
  1039. *
  1040. * Return: 0 for success
  1041. */
  1042. static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
  1043. u32 bw, bool save)
  1044. {
  1045. int ret = 0;
  1046. struct cnss_bus_bw_info *bus_bw_info;
  1047. if (!plat_priv->icc.path_count)
  1048. return -EOPNOTSUPP;
  1049. if (bw >= plat_priv->icc.bus_bw_cfg_count) {
  1050. cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
  1051. return -EINVAL;
  1052. }
  1053. cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
  1054. list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
  1055. ret = icc_set_bw(bus_bw_info->icc_path,
  1056. bus_bw_info->cfg_table[bw].avg_bw,
  1057. bus_bw_info->cfg_table[bw].peak_bw);
  1058. if (ret) {
  1059. cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
  1060. bw, ret, bus_bw_info->icc_name,
  1061. bus_bw_info->cfg_table[bw].avg_bw,
  1062. bus_bw_info->cfg_table[bw].peak_bw);
  1063. break;
  1064. }
  1065. }
  1066. if (ret == 0 && save)
  1067. plat_priv->icc.current_bw_vote = bw;
  1068. return ret;
  1069. }
  1070. int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
  1071. {
  1072. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1073. if (!plat_priv)
  1074. return -ENODEV;
  1075. if (bandwidth < 0)
  1076. return -EINVAL;
  1077. return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
  1078. }
  1079. #else
  1080. static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
  1081. u32 bw, bool save)
  1082. {
  1083. return 0;
  1084. }
  1085. int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
  1086. {
  1087. return 0;
  1088. }
  1089. #endif
  1090. EXPORT_SYMBOL(cnss_request_bus_bandwidth);
  1091. int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
  1092. u32 *val, bool raw_access)
  1093. {
  1094. int ret = 0;
  1095. bool do_force_wake_put = true;
  1096. if (raw_access) {
  1097. ret = cnss_pci_reg_read(pci_priv, offset, val);
  1098. goto out;
  1099. }
  1100. ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
  1101. if (ret)
  1102. goto out;
  1103. ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
  1104. if (ret < 0)
  1105. goto runtime_pm_put;
  1106. ret = cnss_pci_force_wake_get(pci_priv);
  1107. if (ret)
  1108. do_force_wake_put = false;
  1109. ret = cnss_pci_reg_read(pci_priv, offset, val);
  1110. if (ret) {
  1111. cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
  1112. offset, ret);
  1113. goto force_wake_put;
  1114. }
  1115. force_wake_put:
  1116. if (do_force_wake_put)
  1117. cnss_pci_force_wake_put(pci_priv);
  1118. runtime_pm_put:
  1119. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  1120. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  1121. out:
  1122. return ret;
  1123. }
  1124. int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
  1125. u32 val, bool raw_access)
  1126. {
  1127. int ret = 0;
  1128. bool do_force_wake_put = true;
  1129. if (raw_access) {
  1130. ret = cnss_pci_reg_write(pci_priv, offset, val);
  1131. goto out;
  1132. }
  1133. ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
  1134. if (ret)
  1135. goto out;
  1136. ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
  1137. if (ret < 0)
  1138. goto runtime_pm_put;
  1139. ret = cnss_pci_force_wake_get(pci_priv);
  1140. if (ret)
  1141. do_force_wake_put = false;
  1142. ret = cnss_pci_reg_write(pci_priv, offset, val);
  1143. if (ret) {
  1144. cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
  1145. val, offset, ret);
  1146. goto force_wake_put;
  1147. }
  1148. force_wake_put:
  1149. if (do_force_wake_put)
  1150. cnss_pci_force_wake_put(pci_priv);
  1151. runtime_pm_put:
  1152. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  1153. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  1154. out:
  1155. return ret;
  1156. }
  1157. static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
  1158. {
  1159. struct pci_dev *pci_dev = pci_priv->pci_dev;
  1160. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  1161. bool link_down_or_recovery;
  1162. if (!plat_priv)
  1163. return -ENODEV;
  1164. link_down_or_recovery = pci_priv->pci_link_down_ind ||
  1165. (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
  1166. if (save) {
  1167. if (link_down_or_recovery) {
  1168. pci_priv->saved_state = NULL;
  1169. } else {
  1170. pci_save_state(pci_dev);
  1171. pci_priv->saved_state = pci_store_saved_state(pci_dev);
  1172. }
  1173. } else {
  1174. if (link_down_or_recovery) {
  1175. pci_load_saved_state(pci_dev, pci_priv->default_state);
  1176. pci_restore_state(pci_dev);
  1177. } else if (pci_priv->saved_state) {
  1178. pci_load_and_free_saved_state(pci_dev,
  1179. &pci_priv->saved_state);
  1180. pci_restore_state(pci_dev);
  1181. }
  1182. }
  1183. return 0;
  1184. }
  1185. static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
  1186. {
  1187. int ret = 0;
  1188. struct pci_dev *root_port;
  1189. struct device_node *root_of_node;
  1190. struct cnss_plat_data *plat_priv;
  1191. if (!pci_priv)
  1192. return -EINVAL;
  1193. if (pci_priv->device_id != KIWI_DEVICE_ID)
  1194. return ret;
  1195. plat_priv = pci_priv->plat_priv;
  1196. root_port = pcie_find_root_port(pci_priv->pci_dev);
  1197. if (!root_port) {
  1198. cnss_pr_err("PCIe root port is null\n");
  1199. return -EINVAL;
  1200. }
  1201. root_of_node = root_port->dev.of_node;
  1202. if (root_of_node && root_of_node->parent) {
  1203. ret = of_property_read_u32(root_of_node->parent,
  1204. "qcom,target-link-speed",
  1205. &plat_priv->supported_link_speed);
  1206. if (!ret)
  1207. cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
  1208. plat_priv->supported_link_speed);
  1209. else
  1210. plat_priv->supported_link_speed = 0;
  1211. }
  1212. return ret;
  1213. }
  1214. static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
  1215. {
  1216. u16 link_status;
  1217. int ret;
  1218. ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
  1219. &link_status);
  1220. if (ret)
  1221. return ret;
  1222. cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
  1223. pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
  1224. pci_priv->def_link_width =
  1225. (link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
  1226. pci_priv->cur_link_speed = pci_priv->def_link_speed;
  1227. cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
  1228. pci_priv->def_link_speed, pci_priv->def_link_width);
  1229. return 0;
  1230. }
  1231. static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
  1232. {
  1233. u32 reg_offset, val;
  1234. int i;
  1235. switch (pci_priv->device_id) {
  1236. case QCA6390_DEVICE_ID:
  1237. case QCA6490_DEVICE_ID:
  1238. case KIWI_DEVICE_ID:
  1239. case MANGO_DEVICE_ID:
  1240. case PEACH_DEVICE_ID:
  1241. break;
  1242. default:
  1243. return;
  1244. }
  1245. if (in_interrupt() || irqs_disabled())
  1246. return;
  1247. if (cnss_pci_check_link_status(pci_priv))
  1248. return;
  1249. cnss_pr_dbg("Start to dump SOC Scratch registers\n");
  1250. for (i = 0; pci_scratch[i].name; i++) {
  1251. reg_offset = pci_scratch[i].offset;
  1252. if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
  1253. return;
  1254. cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
  1255. pci_scratch[i].name, val);
  1256. }
  1257. }
  1258. int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
  1259. {
  1260. int ret = 0;
  1261. if (!pci_priv)
  1262. return -ENODEV;
  1263. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  1264. cnss_pr_info("PCI link is already suspended\n");
  1265. goto out;
  1266. }
  1267. pci_clear_master(pci_priv->pci_dev);
  1268. ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
  1269. if (ret)
  1270. goto out;
  1271. pci_disable_device(pci_priv->pci_dev);
  1272. if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
  1273. if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
  1274. cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
  1275. }
  1276. /* Always do PCIe L2 suspend during power off/PCIe link recovery */
  1277. pci_priv->drv_connected_last = 0;
  1278. ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
  1279. if (ret)
  1280. goto out;
  1281. pci_priv->pci_link_state = PCI_LINK_DOWN;
  1282. return 0;
  1283. out:
  1284. return ret;
  1285. }
  1286. int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
  1287. {
  1288. int ret = 0;
  1289. if (!pci_priv)
  1290. return -ENODEV;
  1291. if (pci_priv->pci_link_state == PCI_LINK_UP) {
  1292. cnss_pr_info("PCI link is already resumed\n");
  1293. goto out;
  1294. }
  1295. ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
  1296. if (ret) {
  1297. ret = -EAGAIN;
  1298. goto out;
  1299. }
  1300. pci_priv->pci_link_state = PCI_LINK_UP;
  1301. if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
  1302. ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
  1303. if (ret) {
  1304. cnss_pr_err("Failed to set D0, err = %d\n", ret);
  1305. goto out;
  1306. }
  1307. }
  1308. ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
  1309. if (ret)
  1310. goto out;
  1311. ret = pci_enable_device(pci_priv->pci_dev);
  1312. if (ret) {
  1313. cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
  1314. goto out;
  1315. }
  1316. pci_set_master(pci_priv->pci_dev);
  1317. if (pci_priv->pci_link_down_ind)
  1318. pci_priv->pci_link_down_ind = false;
  1319. return 0;
  1320. out:
  1321. return ret;
  1322. }
  1323. int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
  1324. {
  1325. int ret;
  1326. switch (pci_priv->device_id) {
  1327. case QCA6390_DEVICE_ID:
  1328. case QCA6490_DEVICE_ID:
  1329. case KIWI_DEVICE_ID:
  1330. case MANGO_DEVICE_ID:
  1331. case PEACH_DEVICE_ID:
  1332. break;
  1333. default:
  1334. return -EOPNOTSUPP;
  1335. }
  1336. /* Always wait here to avoid missing WAKE assert for RDDM
  1337. * before link recovery
  1338. */
  1339. msleep(WAKE_EVENT_TIMEOUT);
  1340. ret = cnss_suspend_pci_link(pci_priv);
  1341. if (ret)
  1342. cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
  1343. ret = cnss_resume_pci_link(pci_priv);
  1344. if (ret) {
  1345. cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
  1346. del_timer(&pci_priv->dev_rddm_timer);
  1347. return ret;
  1348. }
  1349. mod_timer(&pci_priv->dev_rddm_timer,
  1350. jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
  1351. cnss_mhi_debug_reg_dump(pci_priv);
  1352. cnss_pci_soc_scratch_reg_dump(pci_priv);
  1353. return 0;
  1354. }
  1355. static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
  1356. enum cnss_bus_event_type type,
  1357. void *data)
  1358. {
  1359. struct cnss_bus_event bus_event;
  1360. bus_event.etype = type;
  1361. bus_event.event_data = data;
  1362. cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
  1363. }
  1364. void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
  1365. {
  1366. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  1367. struct pci_dev *pci_dev = pci_priv->pci_dev;
  1368. unsigned long flags;
  1369. if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
  1370. &plat_priv->ctrl_params.quirks))
  1371. panic("cnss: PCI link is down\n");
  1372. spin_lock_irqsave(&pci_link_down_lock, flags);
  1373. if (pci_priv->pci_link_down_ind) {
  1374. cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
  1375. spin_unlock_irqrestore(&pci_link_down_lock, flags);
  1376. return;
  1377. }
  1378. pci_priv->pci_link_down_ind = true;
  1379. spin_unlock_irqrestore(&pci_link_down_lock, flags);
  1380. if (pci_priv->mhi_ctrl) {
  1381. /* Notify MHI about link down*/
  1382. mhi_report_error(pci_priv->mhi_ctrl);
  1383. }
  1384. if (pci_dev->device == QCA6174_DEVICE_ID)
  1385. disable_irq(pci_dev->irq);
  1386. /* Notify bus related event. Now for all supported chips.
  1387. * Here PCIe LINK_DOWN notification taken care.
  1388. * uevent buffer can be extended later, to cover more bus info.
  1389. */
  1390. cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
  1391. cnss_fatal_err("PCI link down, schedule recovery\n");
  1392. cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
  1393. }
  1394. int cnss_pci_link_down(struct device *dev)
  1395. {
  1396. struct pci_dev *pci_dev = to_pci_dev(dev);
  1397. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  1398. struct cnss_plat_data *plat_priv = NULL;
  1399. int ret;
  1400. if (!pci_priv) {
  1401. cnss_pr_err("pci_priv is NULL\n");
  1402. return -EINVAL;
  1403. }
  1404. plat_priv = pci_priv->plat_priv;
  1405. if (!plat_priv) {
  1406. cnss_pr_err("plat_priv is NULL\n");
  1407. return -ENODEV;
  1408. }
  1409. if (pci_priv->pci_link_down_ind) {
  1410. cnss_pr_dbg("PCI link down recovery is already in progress\n");
  1411. return -EBUSY;
  1412. }
  1413. if (pci_priv->drv_connected_last &&
  1414. of_property_read_bool(plat_priv->plat_dev->dev.of_node,
  1415. "cnss-enable-self-recovery"))
  1416. plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
  1417. cnss_pr_err("PCI link down is detected by drivers\n");
  1418. ret = cnss_pci_assert_perst(pci_priv);
  1419. if (ret)
  1420. cnss_pci_handle_linkdown(pci_priv);
  1421. return ret;
  1422. }
  1423. EXPORT_SYMBOL(cnss_pci_link_down);
  1424. int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
  1425. {
  1426. struct pci_dev *pci_dev = to_pci_dev(dev);
  1427. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  1428. if (!pci_priv) {
  1429. cnss_pr_err("pci_priv is NULL\n");
  1430. return -ENODEV;
  1431. }
  1432. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  1433. cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
  1434. return -EACCES;
  1435. }
  1436. cnss_pr_dbg("Start to get PCIe reg dump\n");
  1437. return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
  1438. }
  1439. EXPORT_SYMBOL(cnss_pci_get_reg_dump);
  1440. int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
  1441. {
  1442. struct cnss_plat_data *plat_priv;
  1443. if (!pci_priv) {
  1444. cnss_pr_err("pci_priv is NULL\n");
  1445. return -ENODEV;
  1446. }
  1447. plat_priv = pci_priv->plat_priv;
  1448. if (!plat_priv) {
  1449. cnss_pr_err("plat_priv is NULL\n");
  1450. return -ENODEV;
  1451. }
  1452. return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
  1453. pci_priv->pci_link_down_ind;
  1454. }
  1455. int cnss_pci_is_device_down(struct device *dev)
  1456. {
  1457. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  1458. return cnss_pcie_is_device_down(pci_priv);
  1459. }
  1460. EXPORT_SYMBOL(cnss_pci_is_device_down);
  1461. void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
  1462. {
  1463. spin_lock_bh(&pci_reg_window_lock);
  1464. }
  1465. EXPORT_SYMBOL(cnss_pci_lock_reg_window);
  1466. void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
  1467. {
  1468. spin_unlock_bh(&pci_reg_window_lock);
  1469. }
  1470. EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
  1471. int cnss_get_pci_slot(struct device *dev)
  1472. {
  1473. struct pci_dev *pci_dev = to_pci_dev(dev);
  1474. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  1475. struct cnss_plat_data *plat_priv = NULL;
  1476. if (!pci_priv) {
  1477. cnss_pr_err("pci_priv is NULL\n");
  1478. return -EINVAL;
  1479. }
  1480. plat_priv = pci_priv->plat_priv;
  1481. if (!plat_priv) {
  1482. cnss_pr_err("plat_priv is NULL\n");
  1483. return -ENODEV;
  1484. }
  1485. return plat_priv->rc_num;
  1486. }
  1487. EXPORT_SYMBOL(cnss_get_pci_slot);
  1488. /**
  1489. * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
  1490. * @pci_priv: driver PCI bus context pointer
  1491. *
  1492. * Dump primary and secondary bootloader debug log data. For SBL check the
  1493. * log struct address and size for validity.
  1494. *
  1495. * Return: None
  1496. */
  1497. static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
  1498. {
  1499. enum mhi_ee_type ee;
  1500. u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
  1501. u32 pbl_log_sram_start;
  1502. u32 pbl_stage, sbl_log_start, sbl_log_size;
  1503. u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
  1504. u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
  1505. u32 sbl_log_def_start = SRAM_START;
  1506. u32 sbl_log_def_end = SRAM_END;
  1507. int i;
  1508. switch (pci_priv->device_id) {
  1509. case QCA6390_DEVICE_ID:
  1510. pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
  1511. pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
  1512. sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
  1513. break;
  1514. case QCA6490_DEVICE_ID:
  1515. pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
  1516. pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
  1517. sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
  1518. break;
  1519. case KIWI_DEVICE_ID:
  1520. pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
  1521. pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
  1522. pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
  1523. sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
  1524. break;
  1525. case MANGO_DEVICE_ID:
  1526. pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
  1527. pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
  1528. pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
  1529. sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
  1530. break;
  1531. case PEACH_DEVICE_ID:
  1532. pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
  1533. pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
  1534. pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
  1535. sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
  1536. break;
  1537. default:
  1538. return;
  1539. }
  1540. if (cnss_pci_check_link_status(pci_priv))
  1541. return;
  1542. cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
  1543. cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
  1544. cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
  1545. cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
  1546. cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
  1547. &pbl_bootstrap_status);
  1548. cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
  1549. pbl_stage, sbl_log_start, sbl_log_size);
  1550. cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
  1551. pbl_wlan_boot_cfg, pbl_bootstrap_status);
  1552. ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
  1553. if (CNSS_MHI_IN_MISSION_MODE(ee)) {
  1554. cnss_pr_dbg("Avoid Dumping PBL log data in Mission mode\n");
  1555. return;
  1556. }
  1557. cnss_pr_dbg("Dumping PBL log data\n");
  1558. for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
  1559. mem_addr = pbl_log_sram_start + i;
  1560. if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
  1561. break;
  1562. cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
  1563. }
  1564. sbl_log_size = (sbl_log_size > sbl_log_max_size ?
  1565. sbl_log_max_size : sbl_log_size);
  1566. if (sbl_log_start < sbl_log_def_start ||
  1567. sbl_log_start > sbl_log_def_end ||
  1568. (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
  1569. cnss_pr_err("Invalid SBL log data\n");
  1570. return;
  1571. }
  1572. ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
  1573. if (CNSS_MHI_IN_MISSION_MODE(ee)) {
  1574. cnss_pr_dbg("Avoid Dumping SBL log data in Mission mode\n");
  1575. return;
  1576. }
  1577. cnss_pr_dbg("Dumping SBL log data\n");
  1578. for (i = 0; i < sbl_log_size; i += sizeof(val)) {
  1579. mem_addr = sbl_log_start + i;
  1580. if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
  1581. break;
  1582. cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
  1583. }
  1584. }
  1585. #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
  1586. static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
  1587. {
  1588. }
  1589. #else
  1590. static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
  1591. {
  1592. struct cnss_plat_data *plat_priv;
  1593. u32 i, mem_addr;
  1594. u32 *dump_ptr;
  1595. plat_priv = pci_priv->plat_priv;
  1596. if (plat_priv->device_id != QCA6490_DEVICE_ID ||
  1597. cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
  1598. return;
  1599. if (!plat_priv->sram_dump) {
  1600. cnss_pr_err("SRAM dump memory is not allocated\n");
  1601. return;
  1602. }
  1603. if (cnss_pci_check_link_status(pci_priv))
  1604. return;
  1605. cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
  1606. for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
  1607. mem_addr = SRAM_START + i;
  1608. dump_ptr = (u32 *)(plat_priv->sram_dump + i);
  1609. if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
  1610. cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
  1611. break;
  1612. }
  1613. /* Relinquish CPU after dumping 256KB chunks*/
  1614. if (!(i % CNSS_256KB_SIZE))
  1615. cond_resched();
  1616. }
  1617. }
  1618. #endif
  1619. static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
  1620. {
  1621. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  1622. cnss_fatal_err("MHI power up returns timeout\n");
  1623. if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
  1624. cnss_get_dev_sol_value(plat_priv) > 0) {
  1625. /* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
  1626. * high. If RDDM times out, PBL/SBL error region may have been
  1627. * erased so no need to dump them either.
  1628. */
  1629. if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
  1630. !pci_priv->pci_link_down_ind) {
  1631. mod_timer(&pci_priv->dev_rddm_timer,
  1632. jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
  1633. }
  1634. } else {
  1635. cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
  1636. cnss_mhi_debug_reg_dump(pci_priv);
  1637. cnss_pci_soc_scratch_reg_dump(pci_priv);
  1638. /* Dump PBL/SBL error log if RDDM cookie is not set */
  1639. cnss_pci_dump_bl_sram_mem(pci_priv);
  1640. cnss_pci_dump_sram(pci_priv);
  1641. return -ETIMEDOUT;
  1642. }
  1643. return 0;
  1644. }
  1645. static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
  1646. {
  1647. switch (mhi_state) {
  1648. case CNSS_MHI_INIT:
  1649. return "INIT";
  1650. case CNSS_MHI_DEINIT:
  1651. return "DEINIT";
  1652. case CNSS_MHI_POWER_ON:
  1653. return "POWER_ON";
  1654. case CNSS_MHI_POWERING_OFF:
  1655. return "POWERING_OFF";
  1656. case CNSS_MHI_POWER_OFF:
  1657. return "POWER_OFF";
  1658. case CNSS_MHI_FORCE_POWER_OFF:
  1659. return "FORCE_POWER_OFF";
  1660. case CNSS_MHI_SUSPEND:
  1661. return "SUSPEND";
  1662. case CNSS_MHI_RESUME:
  1663. return "RESUME";
  1664. case CNSS_MHI_TRIGGER_RDDM:
  1665. return "TRIGGER_RDDM";
  1666. case CNSS_MHI_RDDM_DONE:
  1667. return "RDDM_DONE";
  1668. default:
  1669. return "UNKNOWN";
  1670. }
  1671. };
  1672. static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
  1673. enum cnss_mhi_state mhi_state)
  1674. {
  1675. switch (mhi_state) {
  1676. case CNSS_MHI_INIT:
  1677. if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
  1678. return 0;
  1679. break;
  1680. case CNSS_MHI_DEINIT:
  1681. case CNSS_MHI_POWER_ON:
  1682. if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
  1683. !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
  1684. return 0;
  1685. break;
  1686. case CNSS_MHI_FORCE_POWER_OFF:
  1687. if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
  1688. return 0;
  1689. break;
  1690. case CNSS_MHI_POWER_OFF:
  1691. case CNSS_MHI_SUSPEND:
  1692. if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
  1693. !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
  1694. return 0;
  1695. break;
  1696. case CNSS_MHI_RESUME:
  1697. if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
  1698. return 0;
  1699. break;
  1700. case CNSS_MHI_TRIGGER_RDDM:
  1701. if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
  1702. !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
  1703. return 0;
  1704. break;
  1705. case CNSS_MHI_RDDM_DONE:
  1706. return 0;
  1707. default:
  1708. cnss_pr_err("Unhandled MHI state: %s(%d)\n",
  1709. cnss_mhi_state_to_str(mhi_state), mhi_state);
  1710. }
  1711. cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
  1712. cnss_mhi_state_to_str(mhi_state), mhi_state,
  1713. pci_priv->mhi_state);
  1714. if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
  1715. CNSS_ASSERT(0);
  1716. return -EINVAL;
  1717. }
  1718. static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
  1719. {
  1720. int read_val, ret;
  1721. if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
  1722. return -EOPNOTSUPP;
  1723. if (cnss_pci_check_link_status(pci_priv))
  1724. return -EINVAL;
  1725. cnss_pr_err("Write GCC Spare with ACE55 Pattern");
  1726. cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
  1727. ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
  1728. cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
  1729. ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
  1730. &read_val);
  1731. cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
  1732. return ret;
  1733. }
  1734. static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
  1735. {
  1736. int read_val, ret;
  1737. u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
  1738. if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
  1739. return -EOPNOTSUPP;
  1740. if (cnss_pci_check_link_status(pci_priv))
  1741. return -EINVAL;
  1742. ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
  1743. cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
  1744. read_val, ret);
  1745. cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
  1746. cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
  1747. cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
  1748. cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
  1749. cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
  1750. pbl_stage, sbl_log_start, sbl_log_size);
  1751. cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
  1752. return ret;
  1753. }
  1754. static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
  1755. enum cnss_mhi_state mhi_state)
  1756. {
  1757. switch (mhi_state) {
  1758. case CNSS_MHI_INIT:
  1759. set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
  1760. break;
  1761. case CNSS_MHI_DEINIT:
  1762. clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
  1763. break;
  1764. case CNSS_MHI_POWER_ON:
  1765. set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
  1766. break;
  1767. case CNSS_MHI_POWERING_OFF:
  1768. set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
  1769. break;
  1770. case CNSS_MHI_POWER_OFF:
  1771. case CNSS_MHI_FORCE_POWER_OFF:
  1772. clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
  1773. clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
  1774. clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
  1775. clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
  1776. break;
  1777. case CNSS_MHI_SUSPEND:
  1778. set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
  1779. break;
  1780. case CNSS_MHI_RESUME:
  1781. clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
  1782. break;
  1783. case CNSS_MHI_TRIGGER_RDDM:
  1784. set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
  1785. break;
  1786. case CNSS_MHI_RDDM_DONE:
  1787. set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
  1788. break;
  1789. default:
  1790. cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
  1791. }
  1792. }
  1793. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  1794. static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
  1795. {
  1796. return mhi_pm_resume_force(pci_priv->mhi_ctrl);
  1797. }
  1798. #else
  1799. static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
  1800. {
  1801. return mhi_pm_resume(pci_priv->mhi_ctrl);
  1802. }
  1803. #endif
  1804. static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
  1805. enum cnss_mhi_state mhi_state)
  1806. {
  1807. int ret = 0, retry = 0;
  1808. if (pci_priv->device_id == QCA6174_DEVICE_ID)
  1809. return 0;
  1810. if (mhi_state < 0) {
  1811. cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
  1812. return -EINVAL;
  1813. }
  1814. ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
  1815. if (ret)
  1816. goto out;
  1817. cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
  1818. cnss_mhi_state_to_str(mhi_state), mhi_state);
  1819. switch (mhi_state) {
  1820. case CNSS_MHI_INIT:
  1821. ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
  1822. break;
  1823. case CNSS_MHI_DEINIT:
  1824. mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
  1825. ret = 0;
  1826. break;
  1827. case CNSS_MHI_POWER_ON:
  1828. ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
  1829. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  1830. /* Only set img_pre_alloc when power up succeeds */
  1831. if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
  1832. cnss_pr_dbg("Notify MHI to use already allocated images\n");
  1833. pci_priv->mhi_ctrl->img_pre_alloc = true;
  1834. }
  1835. #endif
  1836. break;
  1837. case CNSS_MHI_POWER_OFF:
  1838. mhi_power_down(pci_priv->mhi_ctrl, true);
  1839. ret = 0;
  1840. break;
  1841. case CNSS_MHI_FORCE_POWER_OFF:
  1842. mhi_power_down(pci_priv->mhi_ctrl, false);
  1843. ret = 0;
  1844. break;
  1845. case CNSS_MHI_SUSPEND:
  1846. retry_mhi_suspend:
  1847. mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
  1848. if (pci_priv->drv_connected_last)
  1849. ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
  1850. else
  1851. ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
  1852. mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
  1853. if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
  1854. cnss_pr_dbg("Retry MHI suspend #%d\n", retry);
  1855. usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
  1856. MHI_SUSPEND_RETRY_DELAY_US + 1000);
  1857. goto retry_mhi_suspend;
  1858. }
  1859. break;
  1860. case CNSS_MHI_RESUME:
  1861. mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
  1862. if (pci_priv->drv_connected_last) {
  1863. ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
  1864. if (ret) {
  1865. mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
  1866. break;
  1867. }
  1868. ret = cnss_mhi_pm_fast_resume(pci_priv, true);
  1869. cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
  1870. } else {
  1871. if (pci_priv->device_id == QCA6390_DEVICE_ID)
  1872. ret = cnss_mhi_pm_force_resume(pci_priv);
  1873. else
  1874. ret = mhi_pm_resume(pci_priv->mhi_ctrl);
  1875. }
  1876. mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
  1877. break;
  1878. case CNSS_MHI_TRIGGER_RDDM:
  1879. cnss_rddm_trigger_debug(pci_priv);
  1880. ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
  1881. if (ret) {
  1882. cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
  1883. cnss_pr_dbg("Sending host reset req\n");
  1884. ret = cnss_mhi_force_reset(pci_priv);
  1885. cnss_rddm_trigger_check(pci_priv);
  1886. }
  1887. break;
  1888. case CNSS_MHI_RDDM_DONE:
  1889. break;
  1890. default:
  1891. cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
  1892. ret = -EINVAL;
  1893. }
  1894. if (ret)
  1895. goto out;
  1896. cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
  1897. return 0;
  1898. out:
  1899. cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
  1900. cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
  1901. return ret;
  1902. }
  1903. static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
  1904. {
  1905. int ret = 0;
  1906. struct pci_dev *pci_dev = pci_priv->pci_dev;
  1907. struct cnss_plat_data *plat_priv;
  1908. if (!pci_dev)
  1909. return -ENODEV;
  1910. if (!pci_dev->msix_enabled)
  1911. return ret;
  1912. plat_priv = pci_priv->plat_priv;
  1913. if (!plat_priv) {
  1914. cnss_pr_err("plat_priv is NULL\n");
  1915. return -ENODEV;
  1916. }
  1917. ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
  1918. "msix-match-addr",
  1919. &pci_priv->msix_addr);
  1920. cnss_pr_dbg("MSI-X Match address is 0x%X\n",
  1921. pci_priv->msix_addr);
  1922. return ret;
  1923. }
  1924. static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
  1925. {
  1926. struct msi_desc *msi_desc;
  1927. struct cnss_msi_config *msi_config;
  1928. struct pci_dev *pci_dev = pci_priv->pci_dev;
  1929. msi_config = pci_priv->msi_config;
  1930. if (pci_dev->msix_enabled) {
  1931. pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
  1932. cnss_pr_dbg("MSI-X base data is %d\n",
  1933. pci_priv->msi_ep_base_data);
  1934. return 0;
  1935. }
  1936. msi_desc = irq_get_msi_desc(pci_dev->irq);
  1937. if (!msi_desc) {
  1938. cnss_pr_err("msi_desc is NULL!\n");
  1939. return -EINVAL;
  1940. }
  1941. pci_priv->msi_ep_base_data = msi_desc->msg.data;
  1942. cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
  1943. return 0;
  1944. }
  1945. #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
  1946. #define PLC_PCIE_NAME_LEN 14
  1947. static struct cnss_plat_data *
  1948. cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
  1949. {
  1950. int plat_env_count = cnss_get_plat_env_count();
  1951. struct cnss_plat_data *plat_env;
  1952. struct cnss_pci_data *pci_priv;
  1953. int i = 0;
  1954. if (!driver_ops) {
  1955. cnss_pr_err("No cnss driver\n");
  1956. return NULL;
  1957. }
  1958. for (i = 0; i < plat_env_count; i++) {
  1959. plat_env = cnss_get_plat_env(i);
  1960. if (!plat_env)
  1961. continue;
  1962. if (driver_ops->name && plat_env->pld_bus_ops_name) {
  1963. /* driver_ops->name = PLD_PCIE_OPS_NAME
  1964. * #ifdef MULTI_IF_NAME
  1965. * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
  1966. * #else
  1967. * #define PLD_PCIE_OPS_NAME "pld_pcie"
  1968. * #endif
  1969. */
  1970. if (memcmp(driver_ops->name,
  1971. plat_env->pld_bus_ops_name,
  1972. PLC_PCIE_NAME_LEN) == 0)
  1973. return plat_env;
  1974. }
  1975. }
  1976. cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
  1977. /* in the dual wlan card case, the pld_bus_ops_name from dts
  1978. * and driver_ops-> name from ko should match, otherwise
  1979. * wlanhost driver don't know which plat_env it can use;
  1980. * if doesn't find the match one, then get first available
  1981. * instance insteadly.
  1982. */
  1983. for (i = 0; i < plat_env_count; i++) {
  1984. plat_env = cnss_get_plat_env(i);
  1985. if (!plat_env)
  1986. continue;
  1987. pci_priv = plat_env->bus_priv;
  1988. if (!pci_priv) {
  1989. cnss_pr_err("pci_priv is NULL\n");
  1990. continue;
  1991. }
  1992. if (driver_ops == pci_priv->driver_ops)
  1993. return plat_env;
  1994. }
  1995. /* Doesn't find the existing instance,
  1996. * so return the fist empty instance
  1997. */
  1998. for (i = 0; i < plat_env_count; i++) {
  1999. plat_env = cnss_get_plat_env(i);
  2000. if (!plat_env)
  2001. continue;
  2002. pci_priv = plat_env->bus_priv;
  2003. if (!pci_priv) {
  2004. cnss_pr_err("pci_priv is NULL\n");
  2005. continue;
  2006. }
  2007. if (!pci_priv->driver_ops)
  2008. return plat_env;
  2009. }
  2010. return NULL;
  2011. }
  2012. static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
  2013. {
  2014. int ret = 0;
  2015. u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
  2016. struct cnss_plat_data *plat_priv;
  2017. if (!pci_priv) {
  2018. cnss_pr_err("pci_priv is NULL\n");
  2019. return -ENODEV;
  2020. }
  2021. plat_priv = pci_priv->plat_priv;
  2022. /**
  2023. * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
  2024. * wlan fw will use the hardcode 7 as the qrtr node id.
  2025. * in the dual Hastings case, we will read qrtr node id
  2026. * from device tree and pass to get plat_priv->qrtr_node_id,
  2027. * which always is not zero. And then store this new value
  2028. * to pcie register, wlan fw will read out this qrtr node id
  2029. * from this register and overwrite to the hardcode one
  2030. * while do initialization for ipc router.
  2031. * without this change, two Hastings will use the same
  2032. * qrtr node instance id, which will mess up qmi message
  2033. * exchange. According to qrtr spec, every node should
  2034. * have unique qrtr node id
  2035. */
  2036. if (plat_priv->device_id == QCA6390_DEVICE_ID &&
  2037. plat_priv->qrtr_node_id) {
  2038. u32 val;
  2039. cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
  2040. plat_priv->qrtr_node_id);
  2041. ret = cnss_pci_reg_write(pci_priv, scratch,
  2042. plat_priv->qrtr_node_id);
  2043. if (ret) {
  2044. cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
  2045. scratch, ret);
  2046. goto out;
  2047. }
  2048. ret = cnss_pci_reg_read(pci_priv, scratch, &val);
  2049. if (ret) {
  2050. cnss_pr_err("Failed to read SCRATCH REG");
  2051. goto out;
  2052. }
  2053. if (val != plat_priv->qrtr_node_id) {
  2054. cnss_pr_err("qrtr node id write to register doesn't match with readout value");
  2055. return -ERANGE;
  2056. }
  2057. }
  2058. out:
  2059. return ret;
  2060. }
  2061. #else
  2062. static struct cnss_plat_data *
  2063. cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
  2064. {
  2065. return cnss_bus_dev_to_plat_priv(NULL);
  2066. }
  2067. static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
  2068. {
  2069. return 0;
  2070. }
  2071. #endif
  2072. int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
  2073. {
  2074. int ret = 0;
  2075. struct cnss_plat_data *plat_priv;
  2076. unsigned int timeout = 0;
  2077. int retry = 0;
  2078. if (!pci_priv) {
  2079. cnss_pr_err("pci_priv is NULL\n");
  2080. return -ENODEV;
  2081. }
  2082. plat_priv = pci_priv->plat_priv;
  2083. if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
  2084. return 0;
  2085. if (MHI_TIMEOUT_OVERWRITE_MS)
  2086. pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
  2087. cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
  2088. ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
  2089. if (ret)
  2090. return ret;
  2091. timeout = pci_priv->mhi_ctrl->timeout_ms;
  2092. /* For non-perf builds the timeout is 10 (default) * 6 seconds */
  2093. if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
  2094. pci_priv->mhi_ctrl->timeout_ms *= 6;
  2095. else /* For perf builds the timeout is 10 (default) * 3 seconds */
  2096. pci_priv->mhi_ctrl->timeout_ms *= 3;
  2097. retry:
  2098. ret = cnss_pci_store_qrtr_node_id(pci_priv);
  2099. if (ret) {
  2100. if (retry++ < REG_RETRY_MAX_TIMES)
  2101. goto retry;
  2102. else
  2103. return ret;
  2104. }
  2105. /* Start the timer to dump MHI/PBL/SBL debug data periodically */
  2106. mod_timer(&pci_priv->boot_debug_timer,
  2107. jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
  2108. ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
  2109. del_timer_sync(&pci_priv->boot_debug_timer);
  2110. if (ret == 0)
  2111. cnss_wlan_adsp_pc_enable(pci_priv, false);
  2112. pci_priv->mhi_ctrl->timeout_ms = timeout;
  2113. if (ret == -ETIMEDOUT) {
  2114. /* This is a special case needs to be handled that if MHI
  2115. * power on returns -ETIMEDOUT, controller needs to take care
  2116. * the cleanup by calling MHI power down. Force to set the bit
  2117. * for driver internal MHI state to make sure it can be handled
  2118. * properly later.
  2119. */
  2120. set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
  2121. ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
  2122. } else if (!ret) {
  2123. /* kernel may allocate a dummy vector before request_irq and
  2124. * then allocate a real vector when request_irq is called.
  2125. * So get msi_data here again to avoid spurious interrupt
  2126. * as msi_data will configured to srngs.
  2127. */
  2128. if (cnss_pci_is_one_msi(pci_priv))
  2129. ret = cnss_pci_config_msi_data(pci_priv);
  2130. }
  2131. return ret;
  2132. }
  2133. static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
  2134. {
  2135. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2136. if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
  2137. return;
  2138. if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
  2139. cnss_pr_dbg("MHI is already powered off\n");
  2140. return;
  2141. }
  2142. cnss_wlan_adsp_pc_enable(pci_priv, true);
  2143. cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
  2144. cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
  2145. if (!pci_priv->pci_link_down_ind)
  2146. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
  2147. else
  2148. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
  2149. }
  2150. static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
  2151. {
  2152. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2153. if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
  2154. return;
  2155. if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
  2156. cnss_pr_dbg("MHI is already deinited\n");
  2157. return;
  2158. }
  2159. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
  2160. }
  2161. static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
  2162. bool set_vddd4blow, bool set_shutdown,
  2163. bool do_force_wake)
  2164. {
  2165. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2166. int ret;
  2167. u32 val;
  2168. if (!plat_priv->set_wlaon_pwr_ctrl)
  2169. return;
  2170. if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
  2171. pci_priv->pci_link_down_ind)
  2172. return;
  2173. if (do_force_wake)
  2174. if (cnss_pci_force_wake_get(pci_priv))
  2175. return;
  2176. ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
  2177. if (ret) {
  2178. cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
  2179. WLAON_QFPROM_PWR_CTRL_REG, ret);
  2180. goto force_wake_put;
  2181. }
  2182. cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
  2183. WLAON_QFPROM_PWR_CTRL_REG, val);
  2184. if (set_vddd4blow)
  2185. val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
  2186. else
  2187. val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
  2188. if (set_shutdown)
  2189. val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
  2190. else
  2191. val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
  2192. ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
  2193. if (ret) {
  2194. cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
  2195. WLAON_QFPROM_PWR_CTRL_REG, ret);
  2196. goto force_wake_put;
  2197. }
  2198. cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
  2199. WLAON_QFPROM_PWR_CTRL_REG);
  2200. if (set_shutdown)
  2201. usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
  2202. WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
  2203. force_wake_put:
  2204. if (do_force_wake)
  2205. cnss_pci_force_wake_put(pci_priv);
  2206. }
  2207. static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
  2208. u64 *time_us)
  2209. {
  2210. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2211. u32 low, high;
  2212. u64 device_ticks;
  2213. if (!plat_priv->device_freq_hz) {
  2214. cnss_pr_err("Device time clock frequency is not valid\n");
  2215. return -EINVAL;
  2216. }
  2217. switch (pci_priv->device_id) {
  2218. case KIWI_DEVICE_ID:
  2219. case MANGO_DEVICE_ID:
  2220. case PEACH_DEVICE_ID:
  2221. cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
  2222. cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
  2223. break;
  2224. default:
  2225. cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
  2226. cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
  2227. break;
  2228. }
  2229. device_ticks = (u64)high << 32 | low;
  2230. do_div(device_ticks, plat_priv->device_freq_hz / 100000);
  2231. *time_us = device_ticks * 10;
  2232. return 0;
  2233. }
  2234. static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
  2235. {
  2236. switch (pci_priv->device_id) {
  2237. case KIWI_DEVICE_ID:
  2238. case MANGO_DEVICE_ID:
  2239. case PEACH_DEVICE_ID:
  2240. return;
  2241. default:
  2242. break;
  2243. }
  2244. cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
  2245. TIME_SYNC_ENABLE);
  2246. }
  2247. static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
  2248. {
  2249. switch (pci_priv->device_id) {
  2250. case KIWI_DEVICE_ID:
  2251. case MANGO_DEVICE_ID:
  2252. case PEACH_DEVICE_ID:
  2253. return;
  2254. default:
  2255. break;
  2256. }
  2257. cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
  2258. TIME_SYNC_CLEAR);
  2259. }
  2260. static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
  2261. u32 low, u32 high)
  2262. {
  2263. u32 time_reg_low;
  2264. u32 time_reg_high;
  2265. switch (pci_priv->device_id) {
  2266. case KIWI_DEVICE_ID:
  2267. case MANGO_DEVICE_ID:
  2268. case PEACH_DEVICE_ID:
  2269. /* Use the next two shadow registers after host's usage */
  2270. time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
  2271. (pci_priv->plat_priv->num_shadow_regs_v3 *
  2272. SHADOW_REG_LEN_BYTES);
  2273. time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
  2274. break;
  2275. default:
  2276. time_reg_low = PCIE_SHADOW_REG_VALUE_34;
  2277. time_reg_high = PCIE_SHADOW_REG_VALUE_35;
  2278. break;
  2279. }
  2280. cnss_pci_reg_write(pci_priv, time_reg_low, low);
  2281. cnss_pci_reg_write(pci_priv, time_reg_high, high);
  2282. cnss_pci_reg_read(pci_priv, time_reg_low, &low);
  2283. cnss_pci_reg_read(pci_priv, time_reg_high, &high);
  2284. cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
  2285. time_reg_low, low, time_reg_high, high);
  2286. }
  2287. static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
  2288. {
  2289. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2290. struct device *dev = &pci_priv->pci_dev->dev;
  2291. unsigned long flags = 0;
  2292. u64 host_time_us, device_time_us, offset;
  2293. u32 low, high;
  2294. int ret;
  2295. ret = cnss_pci_prevent_l1(dev);
  2296. if (ret)
  2297. goto out;
  2298. ret = cnss_pci_force_wake_get(pci_priv);
  2299. if (ret)
  2300. goto allow_l1;
  2301. spin_lock_irqsave(&time_sync_lock, flags);
  2302. cnss_pci_clear_time_sync_counter(pci_priv);
  2303. cnss_pci_enable_time_sync_counter(pci_priv);
  2304. host_time_us = cnss_get_host_timestamp(plat_priv);
  2305. ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
  2306. cnss_pci_clear_time_sync_counter(pci_priv);
  2307. spin_unlock_irqrestore(&time_sync_lock, flags);
  2308. if (ret)
  2309. goto force_wake_put;
  2310. if (host_time_us < device_time_us) {
  2311. cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
  2312. host_time_us, device_time_us);
  2313. ret = -EINVAL;
  2314. goto force_wake_put;
  2315. }
  2316. offset = host_time_us - device_time_us;
  2317. cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
  2318. host_time_us, device_time_us, offset);
  2319. low = offset & 0xFFFFFFFF;
  2320. high = offset >> 32;
  2321. cnss_pci_time_sync_reg_update(pci_priv, low, high);
  2322. force_wake_put:
  2323. cnss_pci_force_wake_put(pci_priv);
  2324. allow_l1:
  2325. cnss_pci_allow_l1(dev);
  2326. out:
  2327. return ret;
  2328. }
  2329. static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
  2330. {
  2331. struct cnss_pci_data *pci_priv =
  2332. container_of(work, struct cnss_pci_data, time_sync_work.work);
  2333. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2334. unsigned int time_sync_period_ms =
  2335. plat_priv->ctrl_params.time_sync_period;
  2336. if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
  2337. cnss_pr_dbg("Time sync is disabled\n");
  2338. return;
  2339. }
  2340. if (!time_sync_period_ms) {
  2341. cnss_pr_dbg("Skip time sync as time period is 0\n");
  2342. return;
  2343. }
  2344. if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
  2345. return;
  2346. if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
  2347. goto runtime_pm_put;
  2348. mutex_lock(&pci_priv->bus_lock);
  2349. cnss_pci_update_timestamp(pci_priv);
  2350. mutex_unlock(&pci_priv->bus_lock);
  2351. schedule_delayed_work(&pci_priv->time_sync_work,
  2352. msecs_to_jiffies(time_sync_period_ms));
  2353. runtime_pm_put:
  2354. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  2355. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  2356. }
  2357. static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
  2358. {
  2359. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2360. switch (pci_priv->device_id) {
  2361. case QCA6390_DEVICE_ID:
  2362. case QCA6490_DEVICE_ID:
  2363. case KIWI_DEVICE_ID:
  2364. case MANGO_DEVICE_ID:
  2365. case PEACH_DEVICE_ID:
  2366. break;
  2367. default:
  2368. return -EOPNOTSUPP;
  2369. }
  2370. if (!plat_priv->device_freq_hz) {
  2371. cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
  2372. return -EINVAL;
  2373. }
  2374. cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
  2375. return 0;
  2376. }
  2377. static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
  2378. {
  2379. switch (pci_priv->device_id) {
  2380. case QCA6390_DEVICE_ID:
  2381. case QCA6490_DEVICE_ID:
  2382. case KIWI_DEVICE_ID:
  2383. case MANGO_DEVICE_ID:
  2384. case PEACH_DEVICE_ID:
  2385. break;
  2386. default:
  2387. return;
  2388. }
  2389. cancel_delayed_work_sync(&pci_priv->time_sync_work);
  2390. }
  2391. int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
  2392. unsigned long thermal_state,
  2393. int tcdev_id)
  2394. {
  2395. if (!pci_priv) {
  2396. cnss_pr_err("pci_priv is NULL!\n");
  2397. return -ENODEV;
  2398. }
  2399. if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
  2400. cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
  2401. return -EINVAL;
  2402. }
  2403. return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
  2404. thermal_state,
  2405. tcdev_id);
  2406. }
  2407. int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
  2408. unsigned int time_sync_period)
  2409. {
  2410. struct cnss_plat_data *plat_priv;
  2411. if (!pci_priv)
  2412. return -ENODEV;
  2413. plat_priv = pci_priv->plat_priv;
  2414. cnss_pci_stop_time_sync_update(pci_priv);
  2415. plat_priv->ctrl_params.time_sync_period = time_sync_period;
  2416. cnss_pci_start_time_sync_update(pci_priv);
  2417. cnss_pr_dbg("WLAN time sync period %u ms\n",
  2418. plat_priv->ctrl_params.time_sync_period);
  2419. return 0;
  2420. }
  2421. int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
  2422. {
  2423. int ret = 0;
  2424. struct cnss_plat_data *plat_priv;
  2425. if (!pci_priv)
  2426. return -ENODEV;
  2427. plat_priv = pci_priv->plat_priv;
  2428. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  2429. cnss_pr_err("Reboot is in progress, skip driver probe\n");
  2430. return -EINVAL;
  2431. }
  2432. if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
  2433. clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  2434. cnss_pr_dbg("Skip driver probe\n");
  2435. goto out;
  2436. }
  2437. if (!pci_priv->driver_ops) {
  2438. cnss_pr_err("driver_ops is NULL\n");
  2439. ret = -EINVAL;
  2440. goto out;
  2441. }
  2442. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
  2443. test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
  2444. ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
  2445. pci_priv->pci_device_id);
  2446. if (ret) {
  2447. cnss_pr_err("Failed to reinit host driver, err = %d\n",
  2448. ret);
  2449. goto out;
  2450. }
  2451. complete(&plat_priv->recovery_complete);
  2452. } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
  2453. ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
  2454. pci_priv->pci_device_id);
  2455. if (ret) {
  2456. cnss_pr_err("Failed to probe host driver, err = %d\n",
  2457. ret);
  2458. goto out;
  2459. }
  2460. clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
  2461. set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
  2462. cnss_pci_free_blob_mem(pci_priv);
  2463. complete_all(&plat_priv->power_up_complete);
  2464. } else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
  2465. &plat_priv->driver_state)) {
  2466. ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
  2467. pci_priv->pci_device_id);
  2468. if (ret) {
  2469. cnss_pr_err("Failed to idle restart host driver, err = %d\n",
  2470. ret);
  2471. plat_priv->power_up_error = ret;
  2472. complete_all(&plat_priv->power_up_complete);
  2473. goto out;
  2474. }
  2475. clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
  2476. complete_all(&plat_priv->power_up_complete);
  2477. } else {
  2478. complete(&plat_priv->power_up_complete);
  2479. }
  2480. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  2481. clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  2482. __pm_relax(plat_priv->recovery_ws);
  2483. }
  2484. cnss_pci_start_time_sync_update(pci_priv);
  2485. return 0;
  2486. out:
  2487. return ret;
  2488. }
  2489. int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
  2490. {
  2491. struct cnss_plat_data *plat_priv;
  2492. int ret;
  2493. if (!pci_priv)
  2494. return -ENODEV;
  2495. plat_priv = pci_priv->plat_priv;
  2496. if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
  2497. test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
  2498. test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
  2499. cnss_pr_dbg("Skip driver remove\n");
  2500. return 0;
  2501. }
  2502. if (!pci_priv->driver_ops) {
  2503. cnss_pr_err("driver_ops is NULL\n");
  2504. return -EINVAL;
  2505. }
  2506. cnss_pci_stop_time_sync_update(pci_priv);
  2507. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
  2508. test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
  2509. pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
  2510. } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
  2511. pci_priv->driver_ops->remove(pci_priv->pci_dev);
  2512. clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
  2513. } else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
  2514. &plat_priv->driver_state)) {
  2515. ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
  2516. if (ret == -EAGAIN) {
  2517. clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
  2518. &plat_priv->driver_state);
  2519. return ret;
  2520. }
  2521. }
  2522. plat_priv->get_info_cb_ctx = NULL;
  2523. plat_priv->get_info_cb = NULL;
  2524. return 0;
  2525. }
  2526. int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
  2527. int modem_current_status)
  2528. {
  2529. struct cnss_wlan_driver *driver_ops;
  2530. if (!pci_priv)
  2531. return -ENODEV;
  2532. driver_ops = pci_priv->driver_ops;
  2533. if (!driver_ops || !driver_ops->modem_status)
  2534. return -EINVAL;
  2535. driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
  2536. return 0;
  2537. }
  2538. int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
  2539. enum cnss_driver_status status)
  2540. {
  2541. struct cnss_wlan_driver *driver_ops;
  2542. if (!pci_priv)
  2543. return -ENODEV;
  2544. driver_ops = pci_priv->driver_ops;
  2545. if (!driver_ops || !driver_ops->update_status)
  2546. return -EINVAL;
  2547. cnss_pr_dbg("Update driver status: %d\n", status);
  2548. driver_ops->update_status(pci_priv->pci_dev, status);
  2549. return 0;
  2550. }
  2551. static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
  2552. struct cnss_misc_reg *misc_reg,
  2553. u32 misc_reg_size,
  2554. char *reg_name)
  2555. {
  2556. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2557. bool do_force_wake_put = true;
  2558. int i;
  2559. if (!misc_reg)
  2560. return;
  2561. if (in_interrupt() || irqs_disabled())
  2562. return;
  2563. if (cnss_pci_check_link_status(pci_priv))
  2564. return;
  2565. if (cnss_pci_force_wake_get(pci_priv)) {
  2566. /* Continue to dump when device has entered RDDM already */
  2567. if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  2568. return;
  2569. do_force_wake_put = false;
  2570. }
  2571. cnss_pr_dbg("Start to dump %s registers\n", reg_name);
  2572. for (i = 0; i < misc_reg_size; i++) {
  2573. if (!test_bit(pci_priv->misc_reg_dev_mask,
  2574. &misc_reg[i].dev_mask))
  2575. continue;
  2576. if (misc_reg[i].wr) {
  2577. if (misc_reg[i].offset ==
  2578. QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
  2579. i >= 1)
  2580. misc_reg[i].val =
  2581. QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
  2582. misc_reg[i - 1].val;
  2583. if (cnss_pci_reg_write(pci_priv,
  2584. misc_reg[i].offset,
  2585. misc_reg[i].val))
  2586. goto force_wake_put;
  2587. cnss_pr_vdbg("Write 0x%X to 0x%X\n",
  2588. misc_reg[i].val,
  2589. misc_reg[i].offset);
  2590. } else {
  2591. if (cnss_pci_reg_read(pci_priv,
  2592. misc_reg[i].offset,
  2593. &misc_reg[i].val))
  2594. goto force_wake_put;
  2595. }
  2596. }
  2597. force_wake_put:
  2598. if (do_force_wake_put)
  2599. cnss_pci_force_wake_put(pci_priv);
  2600. }
  2601. static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
  2602. {
  2603. if (in_interrupt() || irqs_disabled())
  2604. return;
  2605. if (cnss_pci_check_link_status(pci_priv))
  2606. return;
  2607. cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
  2608. WCSS_REG_SIZE, "wcss");
  2609. cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
  2610. PCIE_REG_SIZE, "pcie");
  2611. cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
  2612. WLAON_REG_SIZE, "wlaon");
  2613. cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
  2614. SYSPM_REG_SIZE, "syspm");
  2615. }
  2616. static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
  2617. {
  2618. int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
  2619. u32 reg_offset;
  2620. bool do_force_wake_put = true;
  2621. if (in_interrupt() || irqs_disabled())
  2622. return;
  2623. if (cnss_pci_check_link_status(pci_priv))
  2624. return;
  2625. if (!pci_priv->debug_reg) {
  2626. pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
  2627. sizeof(*pci_priv->debug_reg)
  2628. * array_size, GFP_KERNEL);
  2629. if (!pci_priv->debug_reg)
  2630. return;
  2631. }
  2632. if (cnss_pci_force_wake_get(pci_priv))
  2633. do_force_wake_put = false;
  2634. cnss_pr_dbg("Start to dump shadow registers\n");
  2635. for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
  2636. reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
  2637. pci_priv->debug_reg[j].offset = reg_offset;
  2638. if (cnss_pci_reg_read(pci_priv, reg_offset,
  2639. &pci_priv->debug_reg[j].val))
  2640. goto force_wake_put;
  2641. }
  2642. for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
  2643. reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
  2644. pci_priv->debug_reg[j].offset = reg_offset;
  2645. if (cnss_pci_reg_read(pci_priv, reg_offset,
  2646. &pci_priv->debug_reg[j].val))
  2647. goto force_wake_put;
  2648. }
  2649. force_wake_put:
  2650. if (do_force_wake_put)
  2651. cnss_pci_force_wake_put(pci_priv);
  2652. }
  2653. static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
  2654. {
  2655. int ret = 0;
  2656. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2657. ret = cnss_power_on_device(plat_priv, false);
  2658. if (ret) {
  2659. cnss_pr_err("Failed to power on device, err = %d\n", ret);
  2660. goto out;
  2661. }
  2662. ret = cnss_resume_pci_link(pci_priv);
  2663. if (ret) {
  2664. cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
  2665. goto power_off;
  2666. }
  2667. ret = cnss_pci_call_driver_probe(pci_priv);
  2668. if (ret)
  2669. goto suspend_link;
  2670. return 0;
  2671. suspend_link:
  2672. cnss_suspend_pci_link(pci_priv);
  2673. power_off:
  2674. cnss_power_off_device(plat_priv);
  2675. out:
  2676. return ret;
  2677. }
  2678. static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
  2679. {
  2680. int ret = 0;
  2681. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2682. cnss_pci_pm_runtime_resume(pci_priv);
  2683. ret = cnss_pci_call_driver_remove(pci_priv);
  2684. if (ret == -EAGAIN)
  2685. goto out;
  2686. cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
  2687. CNSS_BUS_WIDTH_NONE);
  2688. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  2689. cnss_pci_set_auto_suspended(pci_priv, 0);
  2690. ret = cnss_suspend_pci_link(pci_priv);
  2691. if (ret)
  2692. cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
  2693. cnss_power_off_device(plat_priv);
  2694. clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
  2695. clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
  2696. out:
  2697. return ret;
  2698. }
  2699. static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
  2700. {
  2701. if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
  2702. pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
  2703. }
  2704. static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
  2705. {
  2706. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2707. struct cnss_ramdump_info *ramdump_info;
  2708. ramdump_info = &plat_priv->ramdump_info;
  2709. if (!ramdump_info->ramdump_size)
  2710. return -EINVAL;
  2711. return cnss_do_ramdump(plat_priv);
  2712. }
  2713. static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
  2714. {
  2715. struct cnss_pci_data *pci_priv;
  2716. struct cnss_wlan_driver *driver_ops;
  2717. pci_priv = plat_priv->bus_priv;
  2718. driver_ops = pci_priv->driver_ops;
  2719. if (driver_ops && driver_ops->get_driver_mode) {
  2720. plat_priv->driver_mode = driver_ops->get_driver_mode();
  2721. cnss_pci_update_fw_name(pci_priv);
  2722. cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
  2723. }
  2724. }
  2725. static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
  2726. {
  2727. int ret = 0;
  2728. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2729. unsigned int timeout;
  2730. int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
  2731. int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
  2732. if (plat_priv->ramdump_info_v2.dump_data_valid) {
  2733. cnss_pci_clear_dump_info(pci_priv);
  2734. cnss_pci_power_off_mhi(pci_priv);
  2735. cnss_suspend_pci_link(pci_priv);
  2736. cnss_pci_deinit_mhi(pci_priv);
  2737. cnss_power_off_device(plat_priv);
  2738. }
  2739. /* Clear QMI send usage count during every power up */
  2740. pci_priv->qmi_send_usage_count = 0;
  2741. plat_priv->power_up_error = 0;
  2742. cnss_get_driver_mode_update_fw_name(plat_priv);
  2743. retry:
  2744. ret = cnss_power_on_device(plat_priv, false);
  2745. if (ret) {
  2746. cnss_pr_err("Failed to power on device, err = %d\n", ret);
  2747. goto out;
  2748. }
  2749. ret = cnss_resume_pci_link(pci_priv);
  2750. if (ret) {
  2751. cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
  2752. cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
  2753. cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
  2754. if (test_bit(IGNORE_PCI_LINK_FAILURE,
  2755. &plat_priv->ctrl_params.quirks)) {
  2756. cnss_pr_dbg("Ignore PCI link resume failure\n");
  2757. ret = 0;
  2758. goto out;
  2759. }
  2760. if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
  2761. cnss_power_off_device(plat_priv);
  2762. /* Force toggle BT_EN GPIO low */
  2763. if (retry == POWER_ON_RETRY_MAX_TIMES) {
  2764. cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
  2765. retry, bt_en_gpio);
  2766. if (bt_en_gpio >= 0)
  2767. gpio_direction_output(bt_en_gpio, 0);
  2768. cnss_pr_dbg("BT_EN GPIO val: %d\n",
  2769. gpio_get_value(bt_en_gpio));
  2770. }
  2771. cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
  2772. cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
  2773. cnss_get_input_gpio_value(plat_priv,
  2774. sw_ctrl_gpio));
  2775. msleep(POWER_ON_RETRY_DELAY_MS * retry);
  2776. goto retry;
  2777. }
  2778. /* Assert when it reaches maximum retries */
  2779. CNSS_ASSERT(0);
  2780. goto power_off;
  2781. }
  2782. cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
  2783. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
  2784. ret = cnss_pci_start_mhi(pci_priv);
  2785. if (ret) {
  2786. cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
  2787. if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
  2788. !pci_priv->pci_link_down_ind && timeout) {
  2789. /* Start recovery directly for MHI start failures */
  2790. cnss_schedule_recovery(&pci_priv->pci_dev->dev,
  2791. CNSS_REASON_DEFAULT);
  2792. }
  2793. return 0;
  2794. }
  2795. if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
  2796. clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
  2797. clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  2798. return 0;
  2799. }
  2800. cnss_set_pin_connect_status(plat_priv);
  2801. if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
  2802. ret = cnss_pci_call_driver_probe(pci_priv);
  2803. if (ret)
  2804. goto stop_mhi;
  2805. } else if (timeout) {
  2806. if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
  2807. timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
  2808. else
  2809. timeout += WLAN_MISSION_MODE_TIMEOUT;
  2810. mod_timer(&plat_priv->fw_boot_timer,
  2811. jiffies + msecs_to_jiffies(timeout));
  2812. }
  2813. return 0;
  2814. stop_mhi:
  2815. cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
  2816. cnss_pci_power_off_mhi(pci_priv);
  2817. cnss_suspend_pci_link(pci_priv);
  2818. cnss_pci_deinit_mhi(pci_priv);
  2819. power_off:
  2820. cnss_power_off_device(plat_priv);
  2821. out:
  2822. return ret;
  2823. }
  2824. static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
  2825. {
  2826. int ret = 0;
  2827. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2828. int do_force_wake = true;
  2829. cnss_pci_pm_runtime_resume(pci_priv);
  2830. ret = cnss_pci_call_driver_remove(pci_priv);
  2831. if (ret == -EAGAIN)
  2832. goto out;
  2833. cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
  2834. CNSS_BUS_WIDTH_NONE);
  2835. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  2836. cnss_pci_set_auto_suspended(pci_priv, 0);
  2837. if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
  2838. test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
  2839. test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
  2840. test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
  2841. test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
  2842. test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
  2843. del_timer(&pci_priv->dev_rddm_timer);
  2844. cnss_pci_collect_dump_info(pci_priv, false);
  2845. if (!plat_priv->recovery_enabled)
  2846. CNSS_ASSERT(0);
  2847. }
  2848. if (!cnss_is_device_powered_on(plat_priv)) {
  2849. cnss_pr_dbg("Device is already powered off, ignore\n");
  2850. goto skip_power_off;
  2851. }
  2852. if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  2853. do_force_wake = false;
  2854. cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
  2855. /* FBC image will be freed after powering off MHI, so skip
  2856. * if RAM dump data is still valid.
  2857. */
  2858. if (plat_priv->ramdump_info_v2.dump_data_valid)
  2859. goto skip_power_off;
  2860. cnss_pci_power_off_mhi(pci_priv);
  2861. ret = cnss_suspend_pci_link(pci_priv);
  2862. if (ret)
  2863. cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
  2864. cnss_pci_deinit_mhi(pci_priv);
  2865. cnss_power_off_device(plat_priv);
  2866. skip_power_off:
  2867. pci_priv->remap_window = 0;
  2868. clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
  2869. clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
  2870. if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
  2871. test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
  2872. clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
  2873. pci_priv->pci_link_down_ind = false;
  2874. }
  2875. clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
  2876. clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
  2877. memset(&print_optimize, 0, sizeof(print_optimize));
  2878. out:
  2879. return ret;
  2880. }
  2881. static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
  2882. {
  2883. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2884. set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
  2885. cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
  2886. plat_priv->driver_state);
  2887. cnss_pci_collect_dump_info(pci_priv, true);
  2888. clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
  2889. }
  2890. static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
  2891. {
  2892. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  2893. struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
  2894. struct cnss_dump_data *dump_data = &info_v2->dump_data;
  2895. struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
  2896. int ret = 0;
  2897. if (!info_v2->dump_data_valid || !dump_seg ||
  2898. dump_data->nentries == 0)
  2899. return 0;
  2900. ret = cnss_do_elf_ramdump(plat_priv);
  2901. cnss_pci_clear_dump_info(pci_priv);
  2902. cnss_pci_power_off_mhi(pci_priv);
  2903. cnss_suspend_pci_link(pci_priv);
  2904. cnss_pci_deinit_mhi(pci_priv);
  2905. cnss_power_off_device(plat_priv);
  2906. return ret;
  2907. }
  2908. int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
  2909. {
  2910. int ret = 0;
  2911. if (!pci_priv) {
  2912. cnss_pr_err("pci_priv is NULL\n");
  2913. return -ENODEV;
  2914. }
  2915. switch (pci_priv->device_id) {
  2916. case QCA6174_DEVICE_ID:
  2917. ret = cnss_qca6174_powerup(pci_priv);
  2918. break;
  2919. case QCA6290_DEVICE_ID:
  2920. case QCA6390_DEVICE_ID:
  2921. case QCN7605_DEVICE_ID:
  2922. case QCA6490_DEVICE_ID:
  2923. case KIWI_DEVICE_ID:
  2924. case MANGO_DEVICE_ID:
  2925. case PEACH_DEVICE_ID:
  2926. ret = cnss_qca6290_powerup(pci_priv);
  2927. break;
  2928. default:
  2929. cnss_pr_err("Unknown device_id found: 0x%x\n",
  2930. pci_priv->device_id);
  2931. ret = -ENODEV;
  2932. }
  2933. return ret;
  2934. }
  2935. int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
  2936. {
  2937. int ret = 0;
  2938. if (!pci_priv) {
  2939. cnss_pr_err("pci_priv is NULL\n");
  2940. return -ENODEV;
  2941. }
  2942. switch (pci_priv->device_id) {
  2943. case QCA6174_DEVICE_ID:
  2944. ret = cnss_qca6174_shutdown(pci_priv);
  2945. break;
  2946. case QCA6290_DEVICE_ID:
  2947. case QCA6390_DEVICE_ID:
  2948. case QCN7605_DEVICE_ID:
  2949. case QCA6490_DEVICE_ID:
  2950. case KIWI_DEVICE_ID:
  2951. case MANGO_DEVICE_ID:
  2952. case PEACH_DEVICE_ID:
  2953. ret = cnss_qca6290_shutdown(pci_priv);
  2954. break;
  2955. default:
  2956. cnss_pr_err("Unknown device_id found: 0x%x\n",
  2957. pci_priv->device_id);
  2958. ret = -ENODEV;
  2959. }
  2960. return ret;
  2961. }
  2962. int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
  2963. {
  2964. int ret = 0;
  2965. if (!pci_priv) {
  2966. cnss_pr_err("pci_priv is NULL\n");
  2967. return -ENODEV;
  2968. }
  2969. switch (pci_priv->device_id) {
  2970. case QCA6174_DEVICE_ID:
  2971. cnss_qca6174_crash_shutdown(pci_priv);
  2972. break;
  2973. case QCA6290_DEVICE_ID:
  2974. case QCA6390_DEVICE_ID:
  2975. case QCN7605_DEVICE_ID:
  2976. case QCA6490_DEVICE_ID:
  2977. case KIWI_DEVICE_ID:
  2978. case MANGO_DEVICE_ID:
  2979. case PEACH_DEVICE_ID:
  2980. cnss_qca6290_crash_shutdown(pci_priv);
  2981. break;
  2982. default:
  2983. cnss_pr_err("Unknown device_id found: 0x%x\n",
  2984. pci_priv->device_id);
  2985. ret = -ENODEV;
  2986. }
  2987. return ret;
  2988. }
  2989. int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
  2990. {
  2991. int ret = 0;
  2992. if (!pci_priv) {
  2993. cnss_pr_err("pci_priv is NULL\n");
  2994. return -ENODEV;
  2995. }
  2996. switch (pci_priv->device_id) {
  2997. case QCA6174_DEVICE_ID:
  2998. ret = cnss_qca6174_ramdump(pci_priv);
  2999. break;
  3000. case QCA6290_DEVICE_ID:
  3001. case QCA6390_DEVICE_ID:
  3002. case QCN7605_DEVICE_ID:
  3003. case QCA6490_DEVICE_ID:
  3004. case KIWI_DEVICE_ID:
  3005. case MANGO_DEVICE_ID:
  3006. case PEACH_DEVICE_ID:
  3007. ret = cnss_qca6290_ramdump(pci_priv);
  3008. break;
  3009. default:
  3010. cnss_pr_err("Unknown device_id found: 0x%x\n",
  3011. pci_priv->device_id);
  3012. ret = -ENODEV;
  3013. }
  3014. return ret;
  3015. }
  3016. int cnss_pci_is_drv_connected(struct device *dev)
  3017. {
  3018. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  3019. if (!pci_priv)
  3020. return -ENODEV;
  3021. return pci_priv->drv_connected_last;
  3022. }
  3023. EXPORT_SYMBOL(cnss_pci_is_drv_connected);
  3024. static void cnss_wlan_reg_driver_work(struct work_struct *work)
  3025. {
  3026. struct cnss_plat_data *plat_priv =
  3027. container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
  3028. struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
  3029. struct cnss_cal_info *cal_info;
  3030. unsigned int timeout;
  3031. if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
  3032. return;
  3033. if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
  3034. goto reg_driver;
  3035. } else {
  3036. if (plat_priv->charger_mode) {
  3037. cnss_pr_err("Ignore calibration timeout in charger mode\n");
  3038. return;
  3039. }
  3040. if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
  3041. &plat_priv->driver_state)) {
  3042. timeout = cnss_get_timeout(plat_priv,
  3043. CNSS_TIMEOUT_CALIBRATION);
  3044. cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
  3045. timeout / 1000);
  3046. schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
  3047. msecs_to_jiffies(timeout));
  3048. return;
  3049. }
  3050. del_timer(&plat_priv->fw_boot_timer);
  3051. if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
  3052. !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  3053. cnss_pr_err("Timeout waiting for calibration to complete\n");
  3054. CNSS_ASSERT(0);
  3055. }
  3056. cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
  3057. if (!cal_info)
  3058. return;
  3059. cal_info->cal_status = CNSS_CAL_TIMEOUT;
  3060. cnss_driver_event_post(plat_priv,
  3061. CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
  3062. 0, cal_info);
  3063. }
  3064. reg_driver:
  3065. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  3066. cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
  3067. return;
  3068. }
  3069. reinit_completion(&plat_priv->power_up_complete);
  3070. cnss_driver_event_post(plat_priv,
  3071. CNSS_DRIVER_EVENT_REGISTER_DRIVER,
  3072. CNSS_EVENT_SYNC_UNKILLABLE,
  3073. pci_priv->driver_ops);
  3074. }
  3075. int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
  3076. {
  3077. int ret = 0;
  3078. struct cnss_plat_data *plat_priv;
  3079. struct cnss_pci_data *pci_priv;
  3080. const struct pci_device_id *id_table = driver_ops->id_table;
  3081. unsigned int timeout;
  3082. if (!cnss_check_driver_loading_allowed()) {
  3083. cnss_pr_info("No cnss2 dtsi entry present");
  3084. return -ENODEV;
  3085. }
  3086. plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
  3087. if (!plat_priv) {
  3088. cnss_pr_buf("plat_priv is not ready for register driver\n");
  3089. return -EAGAIN;
  3090. }
  3091. pci_priv = plat_priv->bus_priv;
  3092. if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
  3093. while (id_table && id_table->device) {
  3094. if (plat_priv->device_id == id_table->device) {
  3095. if (plat_priv->device_id == KIWI_DEVICE_ID &&
  3096. driver_ops->chip_version != 2) {
  3097. cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
  3098. return -ENODEV;
  3099. }
  3100. cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
  3101. id_table->device);
  3102. plat_priv->driver_ops = driver_ops;
  3103. return 0;
  3104. }
  3105. id_table++;
  3106. }
  3107. return -ENODEV;
  3108. }
  3109. if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
  3110. cnss_pr_info("pci probe not yet done for register driver\n");
  3111. return -EAGAIN;
  3112. }
  3113. if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
  3114. cnss_pr_err("Driver has already registered\n");
  3115. return -EEXIST;
  3116. }
  3117. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  3118. cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
  3119. return -EINVAL;
  3120. }
  3121. if (!id_table || !pci_dev_present(id_table)) {
  3122. /* id_table pointer will move from pci_dev_present(),
  3123. * so check again using local pointer.
  3124. */
  3125. id_table = driver_ops->id_table;
  3126. while (id_table && id_table->vendor) {
  3127. cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
  3128. id_table->device);
  3129. id_table++;
  3130. }
  3131. cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
  3132. pci_priv->device_id);
  3133. return -ENODEV;
  3134. }
  3135. if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
  3136. driver_ops->chip_version != plat_priv->device_version.major_version) {
  3137. cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
  3138. driver_ops->chip_version,
  3139. plat_priv->device_version.major_version);
  3140. return -ENODEV;
  3141. }
  3142. cnss_get_driver_mode_update_fw_name(plat_priv);
  3143. set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
  3144. if (!plat_priv->cbc_enabled ||
  3145. test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
  3146. goto register_driver;
  3147. pci_priv->driver_ops = driver_ops;
  3148. /* If Cold Boot Calibration is enabled, it is the 1st step in init
  3149. * sequence.CBC is done on file system_ready trigger. Qcacld will be
  3150. * loaded from vendor_modprobe.sh at early boot and must be deferred
  3151. * until CBC is complete
  3152. */
  3153. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
  3154. INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
  3155. cnss_wlan_reg_driver_work);
  3156. schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
  3157. msecs_to_jiffies(timeout));
  3158. cnss_pr_info("WLAN register driver deferred for Calibration\n");
  3159. return 0;
  3160. register_driver:
  3161. reinit_completion(&plat_priv->power_up_complete);
  3162. ret = cnss_driver_event_post(plat_priv,
  3163. CNSS_DRIVER_EVENT_REGISTER_DRIVER,
  3164. CNSS_EVENT_SYNC_UNKILLABLE,
  3165. driver_ops);
  3166. return ret;
  3167. }
  3168. EXPORT_SYMBOL(cnss_wlan_register_driver);
  3169. void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
  3170. {
  3171. struct cnss_plat_data *plat_priv;
  3172. int ret = 0;
  3173. unsigned int timeout;
  3174. plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
  3175. if (!plat_priv) {
  3176. cnss_pr_err("plat_priv is NULL\n");
  3177. return;
  3178. }
  3179. mutex_lock(&plat_priv->driver_ops_lock);
  3180. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  3181. goto skip_wait_power_up;
  3182. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
  3183. ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
  3184. msecs_to_jiffies(timeout));
  3185. if (!ret) {
  3186. cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
  3187. timeout);
  3188. CNSS_ASSERT(0);
  3189. }
  3190. skip_wait_power_up:
  3191. if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
  3192. !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  3193. goto skip_wait_recovery;
  3194. reinit_completion(&plat_priv->recovery_complete);
  3195. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
  3196. ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
  3197. msecs_to_jiffies(timeout));
  3198. if (!ret) {
  3199. cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
  3200. timeout);
  3201. CNSS_ASSERT(0);
  3202. }
  3203. skip_wait_recovery:
  3204. cnss_driver_event_post(plat_priv,
  3205. CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
  3206. CNSS_EVENT_SYNC_UNKILLABLE, NULL);
  3207. mutex_unlock(&plat_priv->driver_ops_lock);
  3208. }
  3209. EXPORT_SYMBOL(cnss_wlan_unregister_driver);
  3210. int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
  3211. void *data)
  3212. {
  3213. int ret = 0;
  3214. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  3215. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  3216. cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
  3217. return -EINVAL;
  3218. }
  3219. set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
  3220. pci_priv->driver_ops = data;
  3221. ret = cnss_pci_dev_powerup(pci_priv);
  3222. if (ret) {
  3223. clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
  3224. pci_priv->driver_ops = NULL;
  3225. } else {
  3226. set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
  3227. }
  3228. return ret;
  3229. }
  3230. int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
  3231. {
  3232. struct cnss_plat_data *plat_priv;
  3233. if (!pci_priv)
  3234. return -EINVAL;
  3235. plat_priv = pci_priv->plat_priv;
  3236. set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
  3237. cnss_pci_dev_shutdown(pci_priv);
  3238. pci_priv->driver_ops = NULL;
  3239. clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
  3240. return 0;
  3241. }
  3242. static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
  3243. {
  3244. struct pci_dev *pci_dev = pci_priv->pci_dev;
  3245. struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
  3246. int ret = 0;
  3247. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  3248. pm_message_t state = { .event = PM_EVENT_SUSPEND };
  3249. if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
  3250. driver_ops && driver_ops->suspend) {
  3251. ret = driver_ops->suspend(pci_dev, state);
  3252. if (ret) {
  3253. cnss_pr_err("Failed to suspend host driver, err = %d\n",
  3254. ret);
  3255. ret = -EAGAIN;
  3256. }
  3257. }
  3258. return ret;
  3259. }
  3260. static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
  3261. {
  3262. struct pci_dev *pci_dev = pci_priv->pci_dev;
  3263. struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
  3264. int ret = 0;
  3265. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  3266. if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
  3267. driver_ops && driver_ops->resume) {
  3268. ret = driver_ops->resume(pci_dev);
  3269. if (ret)
  3270. cnss_pr_err("Failed to resume host driver, err = %d\n",
  3271. ret);
  3272. }
  3273. return ret;
  3274. }
  3275. int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
  3276. {
  3277. struct pci_dev *pci_dev = pci_priv->pci_dev;
  3278. int ret = 0;
  3279. if (pci_priv->pci_link_state == PCI_LINK_DOWN)
  3280. goto out;
  3281. if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
  3282. ret = -EAGAIN;
  3283. goto out;
  3284. }
  3285. if (pci_priv->drv_connected_last)
  3286. goto skip_disable_pci;
  3287. pci_clear_master(pci_dev);
  3288. cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
  3289. pci_disable_device(pci_dev);
  3290. ret = pci_set_power_state(pci_dev, PCI_D3hot);
  3291. if (ret)
  3292. cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
  3293. skip_disable_pci:
  3294. if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
  3295. ret = -EAGAIN;
  3296. goto resume_mhi;
  3297. }
  3298. pci_priv->pci_link_state = PCI_LINK_DOWN;
  3299. return 0;
  3300. resume_mhi:
  3301. if (!pci_is_enabled(pci_dev))
  3302. if (pci_enable_device(pci_dev))
  3303. cnss_pr_err("Failed to enable PCI device\n");
  3304. if (pci_priv->saved_state)
  3305. cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
  3306. pci_set_master(pci_dev);
  3307. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
  3308. out:
  3309. return ret;
  3310. }
  3311. int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
  3312. {
  3313. struct pci_dev *pci_dev = pci_priv->pci_dev;
  3314. int ret = 0;
  3315. if (pci_priv->pci_link_state == PCI_LINK_UP)
  3316. goto out;
  3317. if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
  3318. cnss_fatal_err("Failed to resume PCI link from suspend\n");
  3319. cnss_pci_link_down(&pci_dev->dev);
  3320. ret = -EAGAIN;
  3321. goto out;
  3322. }
  3323. pci_priv->pci_link_state = PCI_LINK_UP;
  3324. if (pci_priv->drv_connected_last)
  3325. goto skip_enable_pci;
  3326. ret = pci_enable_device(pci_dev);
  3327. if (ret) {
  3328. cnss_pr_err("Failed to enable PCI device, err = %d\n",
  3329. ret);
  3330. goto out;
  3331. }
  3332. if (pci_priv->saved_state)
  3333. cnss_set_pci_config_space(pci_priv,
  3334. RESTORE_PCI_CONFIG_SPACE);
  3335. pci_set_master(pci_dev);
  3336. skip_enable_pci:
  3337. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
  3338. out:
  3339. return ret;
  3340. }
  3341. static int cnss_pci_suspend(struct device *dev)
  3342. {
  3343. int ret = 0;
  3344. struct pci_dev *pci_dev = to_pci_dev(dev);
  3345. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3346. struct cnss_plat_data *plat_priv;
  3347. if (!pci_priv)
  3348. goto out;
  3349. plat_priv = pci_priv->plat_priv;
  3350. if (!plat_priv)
  3351. goto out;
  3352. if (!cnss_is_device_powered_on(plat_priv))
  3353. goto out;
  3354. /* No mhi state bit set if only finish pcie enumeration,
  3355. * so test_bit is not applicable to check if it is INIT state.
  3356. */
  3357. if (pci_priv->mhi_state == CNSS_MHI_INIT) {
  3358. bool suspend = cnss_should_suspend_pwroff(pci_dev);
  3359. /* Do PCI link suspend and power off in the LPM case
  3360. * if chipset didn't do that after pcie enumeration.
  3361. */
  3362. if (!suspend) {
  3363. ret = cnss_suspend_pci_link(pci_priv);
  3364. if (ret)
  3365. cnss_pr_err("Failed to suspend PCI link, err = %d\n",
  3366. ret);
  3367. cnss_power_off_device(plat_priv);
  3368. goto out;
  3369. }
  3370. }
  3371. if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
  3372. pci_priv->drv_supported) {
  3373. pci_priv->drv_connected_last =
  3374. cnss_pci_get_drv_connected(pci_priv);
  3375. if (!pci_priv->drv_connected_last) {
  3376. cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
  3377. ret = -EAGAIN;
  3378. goto out;
  3379. }
  3380. }
  3381. set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
  3382. ret = cnss_pci_suspend_driver(pci_priv);
  3383. if (ret)
  3384. goto clear_flag;
  3385. if (!pci_priv->disable_pc) {
  3386. mutex_lock(&pci_priv->bus_lock);
  3387. ret = cnss_pci_suspend_bus(pci_priv);
  3388. mutex_unlock(&pci_priv->bus_lock);
  3389. if (ret)
  3390. goto resume_driver;
  3391. }
  3392. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  3393. return 0;
  3394. resume_driver:
  3395. cnss_pci_resume_driver(pci_priv);
  3396. clear_flag:
  3397. pci_priv->drv_connected_last = 0;
  3398. clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
  3399. out:
  3400. return ret;
  3401. }
  3402. static int cnss_pci_resume(struct device *dev)
  3403. {
  3404. int ret = 0;
  3405. struct pci_dev *pci_dev = to_pci_dev(dev);
  3406. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3407. struct cnss_plat_data *plat_priv;
  3408. if (!pci_priv)
  3409. goto out;
  3410. plat_priv = pci_priv->plat_priv;
  3411. if (!plat_priv)
  3412. goto out;
  3413. if (pci_priv->pci_link_down_ind)
  3414. goto out;
  3415. if (!cnss_is_device_powered_on(pci_priv->plat_priv))
  3416. goto out;
  3417. if (!pci_priv->disable_pc) {
  3418. ret = cnss_pci_resume_bus(pci_priv);
  3419. if (ret)
  3420. goto out;
  3421. }
  3422. ret = cnss_pci_resume_driver(pci_priv);
  3423. pci_priv->drv_connected_last = 0;
  3424. clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
  3425. out:
  3426. return ret;
  3427. }
  3428. static int cnss_pci_suspend_noirq(struct device *dev)
  3429. {
  3430. int ret = 0;
  3431. struct pci_dev *pci_dev = to_pci_dev(dev);
  3432. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3433. struct cnss_wlan_driver *driver_ops;
  3434. struct cnss_plat_data *plat_priv;
  3435. if (!pci_priv)
  3436. goto out;
  3437. if (!cnss_is_device_powered_on(pci_priv->plat_priv))
  3438. goto out;
  3439. driver_ops = pci_priv->driver_ops;
  3440. plat_priv = pci_priv->plat_priv;
  3441. if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
  3442. driver_ops && driver_ops->suspend_noirq)
  3443. ret = driver_ops->suspend_noirq(pci_dev);
  3444. if (pci_priv->disable_pc && !pci_dev->state_saved &&
  3445. !pci_priv->plat_priv->use_pm_domain)
  3446. pci_save_state(pci_dev);
  3447. out:
  3448. return ret;
  3449. }
  3450. static int cnss_pci_resume_noirq(struct device *dev)
  3451. {
  3452. int ret = 0;
  3453. struct pci_dev *pci_dev = to_pci_dev(dev);
  3454. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3455. struct cnss_wlan_driver *driver_ops;
  3456. struct cnss_plat_data *plat_priv;
  3457. if (!pci_priv)
  3458. goto out;
  3459. if (!cnss_is_device_powered_on(pci_priv->plat_priv))
  3460. goto out;
  3461. plat_priv = pci_priv->plat_priv;
  3462. driver_ops = pci_priv->driver_ops;
  3463. if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
  3464. driver_ops && driver_ops->resume_noirq &&
  3465. !pci_priv->pci_link_down_ind)
  3466. ret = driver_ops->resume_noirq(pci_dev);
  3467. out:
  3468. return ret;
  3469. }
  3470. static int cnss_pci_runtime_suspend(struct device *dev)
  3471. {
  3472. int ret = 0;
  3473. struct pci_dev *pci_dev = to_pci_dev(dev);
  3474. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3475. struct cnss_plat_data *plat_priv;
  3476. struct cnss_wlan_driver *driver_ops;
  3477. if (!pci_priv)
  3478. return -EAGAIN;
  3479. plat_priv = pci_priv->plat_priv;
  3480. if (!plat_priv)
  3481. return -EAGAIN;
  3482. if (!cnss_is_device_powered_on(pci_priv->plat_priv))
  3483. return -EAGAIN;
  3484. if (pci_priv->pci_link_down_ind) {
  3485. cnss_pr_dbg("PCI link down recovery is in progress!\n");
  3486. return -EAGAIN;
  3487. }
  3488. if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
  3489. pci_priv->drv_supported) {
  3490. pci_priv->drv_connected_last =
  3491. cnss_pci_get_drv_connected(pci_priv);
  3492. if (!pci_priv->drv_connected_last) {
  3493. cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
  3494. return -EAGAIN;
  3495. }
  3496. }
  3497. cnss_pr_vdbg("Runtime suspend start\n");
  3498. driver_ops = pci_priv->driver_ops;
  3499. if (driver_ops && driver_ops->runtime_ops &&
  3500. driver_ops->runtime_ops->runtime_suspend)
  3501. ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
  3502. else
  3503. ret = cnss_auto_suspend(dev);
  3504. if (ret)
  3505. pci_priv->drv_connected_last = 0;
  3506. cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
  3507. return ret;
  3508. }
  3509. static int cnss_pci_runtime_resume(struct device *dev)
  3510. {
  3511. int ret = 0;
  3512. struct pci_dev *pci_dev = to_pci_dev(dev);
  3513. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3514. struct cnss_wlan_driver *driver_ops;
  3515. if (!pci_priv)
  3516. return -EAGAIN;
  3517. if (!cnss_is_device_powered_on(pci_priv->plat_priv))
  3518. return -EAGAIN;
  3519. if (pci_priv->pci_link_down_ind) {
  3520. cnss_pr_dbg("PCI link down recovery is in progress!\n");
  3521. return -EAGAIN;
  3522. }
  3523. cnss_pr_vdbg("Runtime resume start\n");
  3524. driver_ops = pci_priv->driver_ops;
  3525. if (driver_ops && driver_ops->runtime_ops &&
  3526. driver_ops->runtime_ops->runtime_resume)
  3527. ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
  3528. else
  3529. ret = cnss_auto_resume(dev);
  3530. if (!ret)
  3531. pci_priv->drv_connected_last = 0;
  3532. cnss_pr_vdbg("Runtime resume status: %d\n", ret);
  3533. return ret;
  3534. }
  3535. static int cnss_pci_runtime_idle(struct device *dev)
  3536. {
  3537. cnss_pr_vdbg("Runtime idle\n");
  3538. pm_request_autosuspend(dev);
  3539. return -EBUSY;
  3540. }
  3541. int cnss_wlan_pm_control(struct device *dev, bool vote)
  3542. {
  3543. struct pci_dev *pci_dev = to_pci_dev(dev);
  3544. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3545. int ret = 0;
  3546. if (!pci_priv)
  3547. return -ENODEV;
  3548. ret = cnss_pci_disable_pc(pci_priv, vote);
  3549. if (ret)
  3550. return ret;
  3551. pci_priv->disable_pc = vote;
  3552. cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
  3553. return 0;
  3554. }
  3555. EXPORT_SYMBOL(cnss_wlan_pm_control);
  3556. static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
  3557. enum cnss_rtpm_id id)
  3558. {
  3559. if (id >= RTPM_ID_MAX)
  3560. return;
  3561. atomic_inc(&pci_priv->pm_stats.runtime_get);
  3562. atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
  3563. pci_priv->pm_stats.runtime_get_timestamp_id[id] =
  3564. cnss_get_host_timestamp(pci_priv->plat_priv);
  3565. }
  3566. static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
  3567. enum cnss_rtpm_id id)
  3568. {
  3569. if (id >= RTPM_ID_MAX)
  3570. return;
  3571. atomic_inc(&pci_priv->pm_stats.runtime_put);
  3572. atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
  3573. pci_priv->pm_stats.runtime_put_timestamp_id[id] =
  3574. cnss_get_host_timestamp(pci_priv->plat_priv);
  3575. }
  3576. void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
  3577. {
  3578. struct device *dev;
  3579. if (!pci_priv)
  3580. return;
  3581. dev = &pci_priv->pci_dev->dev;
  3582. cnss_pr_dbg("Runtime PM usage count: %d\n",
  3583. atomic_read(&dev->power.usage_count));
  3584. }
  3585. int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
  3586. {
  3587. struct device *dev;
  3588. enum rpm_status status;
  3589. if (!pci_priv)
  3590. return -ENODEV;
  3591. dev = &pci_priv->pci_dev->dev;
  3592. status = dev->power.runtime_status;
  3593. if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
  3594. cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
  3595. (void *)_RET_IP_);
  3596. return pm_request_resume(dev);
  3597. }
  3598. int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
  3599. {
  3600. struct device *dev;
  3601. enum rpm_status status;
  3602. if (!pci_priv)
  3603. return -ENODEV;
  3604. dev = &pci_priv->pci_dev->dev;
  3605. status = dev->power.runtime_status;
  3606. if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
  3607. cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
  3608. (void *)_RET_IP_);
  3609. return pm_runtime_resume(dev);
  3610. }
  3611. int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
  3612. enum cnss_rtpm_id id)
  3613. {
  3614. struct device *dev;
  3615. enum rpm_status status;
  3616. if (!pci_priv)
  3617. return -ENODEV;
  3618. dev = &pci_priv->pci_dev->dev;
  3619. status = dev->power.runtime_status;
  3620. if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
  3621. cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
  3622. (void *)_RET_IP_);
  3623. cnss_pci_pm_runtime_get_record(pci_priv, id);
  3624. return pm_runtime_get(dev);
  3625. }
  3626. int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
  3627. enum cnss_rtpm_id id)
  3628. {
  3629. struct device *dev;
  3630. enum rpm_status status;
  3631. if (!pci_priv)
  3632. return -ENODEV;
  3633. dev = &pci_priv->pci_dev->dev;
  3634. status = dev->power.runtime_status;
  3635. if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
  3636. cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
  3637. (void *)_RET_IP_);
  3638. cnss_pci_pm_runtime_get_record(pci_priv, id);
  3639. return pm_runtime_get_sync(dev);
  3640. }
  3641. void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
  3642. enum cnss_rtpm_id id)
  3643. {
  3644. if (!pci_priv)
  3645. return;
  3646. cnss_pci_pm_runtime_get_record(pci_priv, id);
  3647. pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
  3648. }
  3649. int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
  3650. enum cnss_rtpm_id id)
  3651. {
  3652. struct device *dev;
  3653. if (!pci_priv)
  3654. return -ENODEV;
  3655. dev = &pci_priv->pci_dev->dev;
  3656. if (atomic_read(&dev->power.usage_count) == 0) {
  3657. cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
  3658. return -EINVAL;
  3659. }
  3660. cnss_pci_pm_runtime_put_record(pci_priv, id);
  3661. return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
  3662. }
  3663. void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
  3664. enum cnss_rtpm_id id)
  3665. {
  3666. struct device *dev;
  3667. if (!pci_priv)
  3668. return;
  3669. dev = &pci_priv->pci_dev->dev;
  3670. if (atomic_read(&dev->power.usage_count) == 0) {
  3671. cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
  3672. return;
  3673. }
  3674. cnss_pci_pm_runtime_put_record(pci_priv, id);
  3675. pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
  3676. }
  3677. void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
  3678. {
  3679. if (!pci_priv)
  3680. return;
  3681. pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
  3682. }
  3683. int cnss_auto_suspend(struct device *dev)
  3684. {
  3685. int ret = 0;
  3686. struct pci_dev *pci_dev = to_pci_dev(dev);
  3687. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3688. struct cnss_plat_data *plat_priv;
  3689. if (!pci_priv)
  3690. return -ENODEV;
  3691. plat_priv = pci_priv->plat_priv;
  3692. if (!plat_priv)
  3693. return -ENODEV;
  3694. mutex_lock(&pci_priv->bus_lock);
  3695. if (!pci_priv->qmi_send_usage_count) {
  3696. ret = cnss_pci_suspend_bus(pci_priv);
  3697. if (ret) {
  3698. mutex_unlock(&pci_priv->bus_lock);
  3699. return ret;
  3700. }
  3701. }
  3702. cnss_pci_set_auto_suspended(pci_priv, 1);
  3703. mutex_unlock(&pci_priv->bus_lock);
  3704. cnss_pci_set_monitor_wake_intr(pci_priv, true);
  3705. /* For suspend temporarily set bandwidth vote to NONE and dont save in
  3706. * current_bw_vote as in resume path we should vote for last used
  3707. * bandwidth vote. Also ignore error if bw voting is not setup.
  3708. */
  3709. cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
  3710. return 0;
  3711. }
  3712. EXPORT_SYMBOL(cnss_auto_suspend);
  3713. int cnss_auto_resume(struct device *dev)
  3714. {
  3715. int ret = 0;
  3716. struct pci_dev *pci_dev = to_pci_dev(dev);
  3717. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3718. struct cnss_plat_data *plat_priv;
  3719. if (!pci_priv)
  3720. return -ENODEV;
  3721. plat_priv = pci_priv->plat_priv;
  3722. if (!plat_priv)
  3723. return -ENODEV;
  3724. mutex_lock(&pci_priv->bus_lock);
  3725. ret = cnss_pci_resume_bus(pci_priv);
  3726. if (ret) {
  3727. mutex_unlock(&pci_priv->bus_lock);
  3728. return ret;
  3729. }
  3730. cnss_pci_set_auto_suspended(pci_priv, 0);
  3731. mutex_unlock(&pci_priv->bus_lock);
  3732. cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
  3733. return 0;
  3734. }
  3735. EXPORT_SYMBOL(cnss_auto_resume);
  3736. int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
  3737. {
  3738. struct pci_dev *pci_dev = to_pci_dev(dev);
  3739. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3740. struct cnss_plat_data *plat_priv;
  3741. struct mhi_controller *mhi_ctrl;
  3742. if (!pci_priv)
  3743. return -ENODEV;
  3744. switch (pci_priv->device_id) {
  3745. case QCA6390_DEVICE_ID:
  3746. case QCA6490_DEVICE_ID:
  3747. case KIWI_DEVICE_ID:
  3748. case MANGO_DEVICE_ID:
  3749. case PEACH_DEVICE_ID:
  3750. break;
  3751. default:
  3752. return 0;
  3753. }
  3754. mhi_ctrl = pci_priv->mhi_ctrl;
  3755. if (!mhi_ctrl)
  3756. return -EINVAL;
  3757. plat_priv = pci_priv->plat_priv;
  3758. if (!plat_priv)
  3759. return -ENODEV;
  3760. if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  3761. return -EAGAIN;
  3762. if (timeout_us) {
  3763. /* Busy wait for timeout_us */
  3764. return cnss_mhi_device_get_sync_atomic(pci_priv,
  3765. timeout_us, false);
  3766. } else {
  3767. /* Sleep wait for mhi_ctrl->timeout_ms */
  3768. return mhi_device_get_sync(mhi_ctrl->mhi_dev);
  3769. }
  3770. }
  3771. EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
  3772. int cnss_pci_force_wake_request(struct device *dev)
  3773. {
  3774. struct pci_dev *pci_dev = to_pci_dev(dev);
  3775. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3776. struct cnss_plat_data *plat_priv;
  3777. struct mhi_controller *mhi_ctrl;
  3778. if (!pci_priv)
  3779. return -ENODEV;
  3780. switch (pci_priv->device_id) {
  3781. case QCA6390_DEVICE_ID:
  3782. case QCA6490_DEVICE_ID:
  3783. case KIWI_DEVICE_ID:
  3784. case MANGO_DEVICE_ID:
  3785. case PEACH_DEVICE_ID:
  3786. break;
  3787. default:
  3788. return 0;
  3789. }
  3790. mhi_ctrl = pci_priv->mhi_ctrl;
  3791. if (!mhi_ctrl)
  3792. return -EINVAL;
  3793. plat_priv = pci_priv->plat_priv;
  3794. if (!plat_priv)
  3795. return -ENODEV;
  3796. if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  3797. return -EAGAIN;
  3798. mhi_device_get(mhi_ctrl->mhi_dev);
  3799. return 0;
  3800. }
  3801. EXPORT_SYMBOL(cnss_pci_force_wake_request);
  3802. int cnss_pci_is_device_awake(struct device *dev)
  3803. {
  3804. struct pci_dev *pci_dev = to_pci_dev(dev);
  3805. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3806. struct mhi_controller *mhi_ctrl;
  3807. if (!pci_priv)
  3808. return -ENODEV;
  3809. switch (pci_priv->device_id) {
  3810. case QCA6390_DEVICE_ID:
  3811. case QCA6490_DEVICE_ID:
  3812. case KIWI_DEVICE_ID:
  3813. case MANGO_DEVICE_ID:
  3814. case PEACH_DEVICE_ID:
  3815. break;
  3816. default:
  3817. return 0;
  3818. }
  3819. mhi_ctrl = pci_priv->mhi_ctrl;
  3820. if (!mhi_ctrl)
  3821. return -EINVAL;
  3822. return (mhi_ctrl->dev_state == MHI_STATE_M0);
  3823. }
  3824. EXPORT_SYMBOL(cnss_pci_is_device_awake);
  3825. int cnss_pci_force_wake_release(struct device *dev)
  3826. {
  3827. struct pci_dev *pci_dev = to_pci_dev(dev);
  3828. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  3829. struct cnss_plat_data *plat_priv;
  3830. struct mhi_controller *mhi_ctrl;
  3831. if (!pci_priv)
  3832. return -ENODEV;
  3833. switch (pci_priv->device_id) {
  3834. case QCA6390_DEVICE_ID:
  3835. case QCA6490_DEVICE_ID:
  3836. case KIWI_DEVICE_ID:
  3837. case MANGO_DEVICE_ID:
  3838. case PEACH_DEVICE_ID:
  3839. break;
  3840. default:
  3841. return 0;
  3842. }
  3843. mhi_ctrl = pci_priv->mhi_ctrl;
  3844. if (!mhi_ctrl)
  3845. return -EINVAL;
  3846. plat_priv = pci_priv->plat_priv;
  3847. if (!plat_priv)
  3848. return -ENODEV;
  3849. if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  3850. return -EAGAIN;
  3851. mhi_device_put(mhi_ctrl->mhi_dev);
  3852. return 0;
  3853. }
  3854. EXPORT_SYMBOL(cnss_pci_force_wake_release);
  3855. int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
  3856. {
  3857. int ret = 0;
  3858. if (!pci_priv)
  3859. return -ENODEV;
  3860. mutex_lock(&pci_priv->bus_lock);
  3861. if (cnss_pci_get_auto_suspended(pci_priv) &&
  3862. !pci_priv->qmi_send_usage_count)
  3863. ret = cnss_pci_resume_bus(pci_priv);
  3864. pci_priv->qmi_send_usage_count++;
  3865. cnss_pr_buf("Increased QMI send usage count to %d\n",
  3866. pci_priv->qmi_send_usage_count);
  3867. mutex_unlock(&pci_priv->bus_lock);
  3868. return ret;
  3869. }
  3870. int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
  3871. {
  3872. int ret = 0;
  3873. if (!pci_priv)
  3874. return -ENODEV;
  3875. mutex_lock(&pci_priv->bus_lock);
  3876. if (pci_priv->qmi_send_usage_count)
  3877. pci_priv->qmi_send_usage_count--;
  3878. cnss_pr_buf("Decreased QMI send usage count to %d\n",
  3879. pci_priv->qmi_send_usage_count);
  3880. if (cnss_pci_get_auto_suspended(pci_priv) &&
  3881. !pci_priv->qmi_send_usage_count &&
  3882. !cnss_pcie_is_device_down(pci_priv))
  3883. ret = cnss_pci_suspend_bus(pci_priv);
  3884. mutex_unlock(&pci_priv->bus_lock);
  3885. return ret;
  3886. }
  3887. int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
  3888. uint32_t len, uint8_t slotid)
  3889. {
  3890. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  3891. struct cnss_fw_mem *fw_mem;
  3892. void *mem = NULL;
  3893. int i, ret;
  3894. u32 *status;
  3895. if (!plat_priv)
  3896. return -EINVAL;
  3897. fw_mem = plat_priv->fw_mem;
  3898. if (slotid >= AFC_MAX_SLOT) {
  3899. cnss_pr_err("Invalid slot id %d\n", slotid);
  3900. ret = -EINVAL;
  3901. goto err;
  3902. }
  3903. if (len > AFC_SLOT_SIZE) {
  3904. cnss_pr_err("len %d greater than slot size", len);
  3905. ret = -EINVAL;
  3906. goto err;
  3907. }
  3908. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  3909. if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
  3910. mem = fw_mem[i].va;
  3911. status = mem + (slotid * AFC_SLOT_SIZE);
  3912. break;
  3913. }
  3914. }
  3915. if (!mem) {
  3916. cnss_pr_err("AFC mem is not available\n");
  3917. ret = -ENOMEM;
  3918. goto err;
  3919. }
  3920. memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
  3921. if (len < AFC_SLOT_SIZE)
  3922. memset(mem + (slotid * AFC_SLOT_SIZE) + len,
  3923. 0, AFC_SLOT_SIZE - len);
  3924. status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
  3925. return 0;
  3926. err:
  3927. return ret;
  3928. }
  3929. EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
  3930. int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
  3931. {
  3932. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  3933. struct cnss_fw_mem *fw_mem;
  3934. void *mem = NULL;
  3935. int i, ret;
  3936. if (!plat_priv)
  3937. return -EINVAL;
  3938. fw_mem = plat_priv->fw_mem;
  3939. if (slotid >= AFC_MAX_SLOT) {
  3940. cnss_pr_err("Invalid slot id %d\n", slotid);
  3941. ret = -EINVAL;
  3942. goto err;
  3943. }
  3944. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  3945. if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
  3946. mem = fw_mem[i].va;
  3947. break;
  3948. }
  3949. }
  3950. if (!mem) {
  3951. cnss_pr_err("AFC mem is not available\n");
  3952. ret = -ENOMEM;
  3953. goto err;
  3954. }
  3955. memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
  3956. return 0;
  3957. err:
  3958. return ret;
  3959. }
  3960. EXPORT_SYMBOL(cnss_reset_afcmem);
  3961. int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
  3962. {
  3963. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  3964. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  3965. struct device *dev = &pci_priv->pci_dev->dev;
  3966. int i;
  3967. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  3968. if (!fw_mem[i].va && fw_mem[i].size) {
  3969. retry:
  3970. fw_mem[i].va =
  3971. dma_alloc_attrs(dev, fw_mem[i].size,
  3972. &fw_mem[i].pa, GFP_KERNEL,
  3973. fw_mem[i].attrs);
  3974. if (!fw_mem[i].va) {
  3975. if ((fw_mem[i].attrs &
  3976. DMA_ATTR_FORCE_CONTIGUOUS)) {
  3977. fw_mem[i].attrs &=
  3978. ~DMA_ATTR_FORCE_CONTIGUOUS;
  3979. cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
  3980. fw_mem[i].type);
  3981. goto retry;
  3982. }
  3983. cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
  3984. fw_mem[i].size, fw_mem[i].type);
  3985. CNSS_ASSERT(0);
  3986. return -ENOMEM;
  3987. }
  3988. }
  3989. }
  3990. return 0;
  3991. }
  3992. static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
  3993. {
  3994. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  3995. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  3996. struct device *dev = &pci_priv->pci_dev->dev;
  3997. int i;
  3998. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  3999. if (fw_mem[i].va && fw_mem[i].size) {
  4000. cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
  4001. fw_mem[i].va, &fw_mem[i].pa,
  4002. fw_mem[i].size, fw_mem[i].type);
  4003. dma_free_attrs(dev, fw_mem[i].size,
  4004. fw_mem[i].va, fw_mem[i].pa,
  4005. fw_mem[i].attrs);
  4006. fw_mem[i].va = NULL;
  4007. fw_mem[i].pa = 0;
  4008. fw_mem[i].size = 0;
  4009. fw_mem[i].type = 0;
  4010. }
  4011. }
  4012. plat_priv->fw_mem_seg_len = 0;
  4013. }
  4014. int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
  4015. {
  4016. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4017. struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
  4018. int i, j;
  4019. for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
  4020. if (!qdss_mem[i].va && qdss_mem[i].size) {
  4021. qdss_mem[i].va =
  4022. dma_alloc_coherent(&pci_priv->pci_dev->dev,
  4023. qdss_mem[i].size,
  4024. &qdss_mem[i].pa,
  4025. GFP_KERNEL);
  4026. if (!qdss_mem[i].va) {
  4027. cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
  4028. qdss_mem[i].size,
  4029. qdss_mem[i].type, i);
  4030. break;
  4031. }
  4032. }
  4033. }
  4034. /* Best-effort allocation for QDSS trace */
  4035. if (i < plat_priv->qdss_mem_seg_len) {
  4036. for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
  4037. qdss_mem[j].type = 0;
  4038. qdss_mem[j].size = 0;
  4039. }
  4040. plat_priv->qdss_mem_seg_len = i;
  4041. }
  4042. return 0;
  4043. }
  4044. void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
  4045. {
  4046. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4047. struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
  4048. int i;
  4049. for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
  4050. if (qdss_mem[i].va && qdss_mem[i].size) {
  4051. cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
  4052. &qdss_mem[i].pa, qdss_mem[i].size,
  4053. qdss_mem[i].type);
  4054. dma_free_coherent(&pci_priv->pci_dev->dev,
  4055. qdss_mem[i].size, qdss_mem[i].va,
  4056. qdss_mem[i].pa);
  4057. qdss_mem[i].va = NULL;
  4058. qdss_mem[i].pa = 0;
  4059. qdss_mem[i].size = 0;
  4060. qdss_mem[i].type = 0;
  4061. }
  4062. }
  4063. plat_priv->qdss_mem_seg_len = 0;
  4064. }
  4065. int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
  4066. {
  4067. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4068. struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
  4069. char filename[MAX_FIRMWARE_NAME_LEN];
  4070. char *tme_patch_filename = NULL;
  4071. const struct firmware *fw_entry;
  4072. int ret = 0;
  4073. switch (pci_priv->device_id) {
  4074. case PEACH_DEVICE_ID:
  4075. tme_patch_filename = TME_PATCH_FILE_NAME;
  4076. break;
  4077. case QCA6174_DEVICE_ID:
  4078. case QCA6290_DEVICE_ID:
  4079. case QCA6390_DEVICE_ID:
  4080. case QCA6490_DEVICE_ID:
  4081. case KIWI_DEVICE_ID:
  4082. case MANGO_DEVICE_ID:
  4083. default:
  4084. cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
  4085. pci_priv->device_id);
  4086. return 0;
  4087. }
  4088. if (!tme_lite_mem->va && !tme_lite_mem->size) {
  4089. cnss_pci_add_fw_prefix_name(pci_priv, filename,
  4090. tme_patch_filename);
  4091. ret = firmware_request_nowarn(&fw_entry, filename,
  4092. &pci_priv->pci_dev->dev);
  4093. if (ret) {
  4094. cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
  4095. filename, ret);
  4096. return ret;
  4097. }
  4098. tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
  4099. fw_entry->size, &tme_lite_mem->pa,
  4100. GFP_KERNEL);
  4101. if (!tme_lite_mem->va) {
  4102. cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
  4103. fw_entry->size);
  4104. release_firmware(fw_entry);
  4105. return -ENOMEM;
  4106. }
  4107. memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
  4108. tme_lite_mem->size = fw_entry->size;
  4109. release_firmware(fw_entry);
  4110. }
  4111. return 0;
  4112. }
  4113. static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
  4114. {
  4115. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4116. struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
  4117. if (tme_lite_mem->va && tme_lite_mem->size) {
  4118. cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
  4119. tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
  4120. dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
  4121. tme_lite_mem->va, tme_lite_mem->pa);
  4122. }
  4123. tme_lite_mem->va = NULL;
  4124. tme_lite_mem->pa = 0;
  4125. tme_lite_mem->size = 0;
  4126. }
  4127. int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
  4128. {
  4129. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4130. struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
  4131. char filename[MAX_FIRMWARE_NAME_LEN];
  4132. char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
  4133. const struct firmware *fw_entry;
  4134. int ret = 0;
  4135. /* Use forward compatibility here since for any recent device
  4136. * it should use DEFAULT_PHY_UCODE_FILE_NAME.
  4137. */
  4138. switch (pci_priv->device_id) {
  4139. case QCA6174_DEVICE_ID:
  4140. cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
  4141. pci_priv->device_id);
  4142. return -EINVAL;
  4143. case QCA6290_DEVICE_ID:
  4144. case QCA6390_DEVICE_ID:
  4145. case QCA6490_DEVICE_ID:
  4146. phy_filename = DEFAULT_PHY_M3_FILE_NAME;
  4147. break;
  4148. case KIWI_DEVICE_ID:
  4149. case MANGO_DEVICE_ID:
  4150. case PEACH_DEVICE_ID:
  4151. switch (plat_priv->device_version.major_version) {
  4152. case FW_V2_NUMBER:
  4153. phy_filename = PHY_UCODE_V2_FILE_NAME;
  4154. break;
  4155. default:
  4156. break;
  4157. }
  4158. break;
  4159. default:
  4160. break;
  4161. }
  4162. if (!m3_mem->va && !m3_mem->size) {
  4163. cnss_pci_add_fw_prefix_name(pci_priv, filename,
  4164. phy_filename);
  4165. ret = firmware_request_nowarn(&fw_entry, filename,
  4166. &pci_priv->pci_dev->dev);
  4167. if (ret) {
  4168. cnss_pr_err("Failed to load M3 image: %s\n", filename);
  4169. return ret;
  4170. }
  4171. m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
  4172. fw_entry->size, &m3_mem->pa,
  4173. GFP_KERNEL);
  4174. if (!m3_mem->va) {
  4175. cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
  4176. fw_entry->size);
  4177. release_firmware(fw_entry);
  4178. return -ENOMEM;
  4179. }
  4180. memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
  4181. m3_mem->size = fw_entry->size;
  4182. release_firmware(fw_entry);
  4183. }
  4184. return 0;
  4185. }
  4186. static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
  4187. {
  4188. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4189. struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
  4190. if (m3_mem->va && m3_mem->size) {
  4191. cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
  4192. m3_mem->va, &m3_mem->pa, m3_mem->size);
  4193. dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
  4194. m3_mem->va, m3_mem->pa);
  4195. }
  4196. m3_mem->va = NULL;
  4197. m3_mem->pa = 0;
  4198. m3_mem->size = 0;
  4199. }
  4200. #ifdef CONFIG_FREE_M3_BLOB_MEM
  4201. void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
  4202. {
  4203. cnss_pci_free_m3_mem(pci_priv);
  4204. }
  4205. #else
  4206. void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
  4207. {
  4208. }
  4209. #endif
  4210. int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
  4211. {
  4212. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4213. struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
  4214. char filename[MAX_FIRMWARE_NAME_LEN];
  4215. char *aux_filename = DEFAULT_AUX_FILE_NAME;
  4216. const struct firmware *fw_entry;
  4217. int ret = 0;
  4218. if (!aux_mem->va && !aux_mem->size) {
  4219. cnss_pci_add_fw_prefix_name(pci_priv, filename,
  4220. aux_filename);
  4221. ret = firmware_request_nowarn(&fw_entry, filename,
  4222. &pci_priv->pci_dev->dev);
  4223. if (ret) {
  4224. cnss_pr_err("Failed to load AUX image: %s\n", filename);
  4225. return ret;
  4226. }
  4227. aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
  4228. fw_entry->size, &aux_mem->pa,
  4229. GFP_KERNEL);
  4230. if (!aux_mem->va) {
  4231. cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
  4232. fw_entry->size);
  4233. release_firmware(fw_entry);
  4234. return -ENOMEM;
  4235. }
  4236. memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
  4237. aux_mem->size = fw_entry->size;
  4238. release_firmware(fw_entry);
  4239. }
  4240. return 0;
  4241. }
  4242. static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
  4243. {
  4244. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4245. struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
  4246. if (aux_mem->va && aux_mem->size) {
  4247. cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
  4248. aux_mem->va, &aux_mem->pa, aux_mem->size);
  4249. dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
  4250. aux_mem->va, aux_mem->pa);
  4251. }
  4252. aux_mem->va = NULL;
  4253. aux_mem->pa = 0;
  4254. aux_mem->size = 0;
  4255. }
  4256. void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
  4257. {
  4258. struct cnss_plat_data *plat_priv;
  4259. if (!pci_priv)
  4260. return;
  4261. cnss_fatal_err("Timeout waiting for FW ready indication\n");
  4262. plat_priv = pci_priv->plat_priv;
  4263. if (!plat_priv)
  4264. return;
  4265. if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
  4266. cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
  4267. return;
  4268. }
  4269. cnss_schedule_recovery(&pci_priv->pci_dev->dev,
  4270. CNSS_REASON_TIMEOUT);
  4271. }
  4272. static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
  4273. {
  4274. pci_priv->iommu_domain = NULL;
  4275. }
  4276. int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
  4277. {
  4278. if (!pci_priv)
  4279. return -ENODEV;
  4280. if (!pci_priv->smmu_iova_len)
  4281. return -EINVAL;
  4282. *addr = pci_priv->smmu_iova_start;
  4283. *size = pci_priv->smmu_iova_len;
  4284. return 0;
  4285. }
  4286. int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
  4287. {
  4288. if (!pci_priv)
  4289. return -ENODEV;
  4290. if (!pci_priv->smmu_iova_ipa_len)
  4291. return -EINVAL;
  4292. *addr = pci_priv->smmu_iova_ipa_start;
  4293. *size = pci_priv->smmu_iova_ipa_len;
  4294. return 0;
  4295. }
  4296. bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
  4297. {
  4298. if (pci_priv)
  4299. return pci_priv->smmu_s1_enable;
  4300. return false;
  4301. }
  4302. struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
  4303. {
  4304. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4305. if (!pci_priv)
  4306. return NULL;
  4307. return pci_priv->iommu_domain;
  4308. }
  4309. EXPORT_SYMBOL(cnss_smmu_get_domain);
  4310. int cnss_smmu_map(struct device *dev,
  4311. phys_addr_t paddr, uint32_t *iova_addr, size_t size)
  4312. {
  4313. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4314. struct cnss_plat_data *plat_priv;
  4315. unsigned long iova;
  4316. size_t len;
  4317. int ret = 0;
  4318. int flag = IOMMU_READ | IOMMU_WRITE;
  4319. struct pci_dev *root_port;
  4320. struct device_node *root_of_node;
  4321. bool dma_coherent = false;
  4322. if (!pci_priv)
  4323. return -ENODEV;
  4324. if (!iova_addr) {
  4325. cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
  4326. &paddr, size);
  4327. return -EINVAL;
  4328. }
  4329. plat_priv = pci_priv->plat_priv;
  4330. len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
  4331. iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
  4332. if (pci_priv->iommu_geometry &&
  4333. iova >= pci_priv->smmu_iova_ipa_start +
  4334. pci_priv->smmu_iova_ipa_len) {
  4335. cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
  4336. iova,
  4337. &pci_priv->smmu_iova_ipa_start,
  4338. pci_priv->smmu_iova_ipa_len);
  4339. return -ENOMEM;
  4340. }
  4341. if (!test_bit(DISABLE_IO_COHERENCY,
  4342. &plat_priv->ctrl_params.quirks)) {
  4343. root_port = pcie_find_root_port(pci_priv->pci_dev);
  4344. if (!root_port) {
  4345. cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
  4346. } else {
  4347. root_of_node = root_port->dev.of_node;
  4348. if (root_of_node && root_of_node->parent) {
  4349. dma_coherent =
  4350. of_property_read_bool(root_of_node->parent,
  4351. "dma-coherent");
  4352. cnss_pr_dbg("dma-coherent is %s\n",
  4353. dma_coherent ? "enabled" : "disabled");
  4354. if (dma_coherent)
  4355. flag |= IOMMU_CACHE;
  4356. }
  4357. }
  4358. }
  4359. cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
  4360. ret = iommu_map(pci_priv->iommu_domain, iova,
  4361. rounddown(paddr, PAGE_SIZE), len, flag);
  4362. if (ret) {
  4363. cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
  4364. return ret;
  4365. }
  4366. pci_priv->smmu_iova_ipa_current = iova + len;
  4367. *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
  4368. cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
  4369. return 0;
  4370. }
  4371. EXPORT_SYMBOL(cnss_smmu_map);
  4372. int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
  4373. {
  4374. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4375. unsigned long iova;
  4376. size_t unmapped;
  4377. size_t len;
  4378. if (!pci_priv)
  4379. return -ENODEV;
  4380. iova = rounddown(iova_addr, PAGE_SIZE);
  4381. len = roundup(size + iova_addr - iova, PAGE_SIZE);
  4382. if (iova >= pci_priv->smmu_iova_ipa_start +
  4383. pci_priv->smmu_iova_ipa_len) {
  4384. cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
  4385. iova,
  4386. &pci_priv->smmu_iova_ipa_start,
  4387. pci_priv->smmu_iova_ipa_len);
  4388. return -ENOMEM;
  4389. }
  4390. cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
  4391. unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
  4392. if (unmapped != len) {
  4393. cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
  4394. unmapped, len);
  4395. return -EINVAL;
  4396. }
  4397. pci_priv->smmu_iova_ipa_current = iova;
  4398. return 0;
  4399. }
  4400. EXPORT_SYMBOL(cnss_smmu_unmap);
  4401. int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
  4402. {
  4403. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4404. struct cnss_plat_data *plat_priv;
  4405. if (!pci_priv)
  4406. return -ENODEV;
  4407. plat_priv = pci_priv->plat_priv;
  4408. if (!plat_priv)
  4409. return -ENODEV;
  4410. info->va = pci_priv->bar;
  4411. info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
  4412. info->chip_id = plat_priv->chip_info.chip_id;
  4413. info->chip_family = plat_priv->chip_info.chip_family;
  4414. info->board_id = plat_priv->board_info.board_id;
  4415. info->soc_id = plat_priv->soc_info.soc_id;
  4416. info->fw_version = plat_priv->fw_version_info.fw_version;
  4417. strlcpy(info->fw_build_timestamp,
  4418. plat_priv->fw_version_info.fw_build_timestamp,
  4419. sizeof(info->fw_build_timestamp));
  4420. memcpy(&info->device_version, &plat_priv->device_version,
  4421. sizeof(info->device_version));
  4422. memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
  4423. sizeof(info->dev_mem_info));
  4424. memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
  4425. sizeof(info->fw_build_id));
  4426. return 0;
  4427. }
  4428. EXPORT_SYMBOL(cnss_get_soc_info);
  4429. int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
  4430. char *user_name,
  4431. int *num_vectors,
  4432. u32 *user_base_data,
  4433. u32 *base_vector)
  4434. {
  4435. return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
  4436. user_name,
  4437. num_vectors,
  4438. user_base_data,
  4439. base_vector);
  4440. }
  4441. static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
  4442. unsigned int vec,
  4443. const struct cpumask *cpumask)
  4444. {
  4445. int ret;
  4446. struct pci_dev *pci_dev = pci_priv->pci_dev;
  4447. ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
  4448. cpumask);
  4449. return ret;
  4450. }
  4451. static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
  4452. {
  4453. int ret = 0;
  4454. struct pci_dev *pci_dev = pci_priv->pci_dev;
  4455. int num_vectors;
  4456. struct cnss_msi_config *msi_config;
  4457. if (pci_priv->device_id == QCA6174_DEVICE_ID)
  4458. return 0;
  4459. if (cnss_pci_is_force_one_msi(pci_priv)) {
  4460. ret = cnss_pci_get_one_msi_assignment(pci_priv);
  4461. cnss_pr_dbg("force one msi\n");
  4462. } else {
  4463. ret = cnss_pci_get_msi_assignment(pci_priv);
  4464. }
  4465. if (ret) {
  4466. cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
  4467. goto out;
  4468. }
  4469. msi_config = pci_priv->msi_config;
  4470. if (!msi_config) {
  4471. cnss_pr_err("msi_config is NULL!\n");
  4472. ret = -EINVAL;
  4473. goto out;
  4474. }
  4475. num_vectors = pci_alloc_irq_vectors(pci_dev,
  4476. msi_config->total_vectors,
  4477. msi_config->total_vectors,
  4478. PCI_IRQ_MSI | PCI_IRQ_MSIX);
  4479. if ((num_vectors != msi_config->total_vectors) &&
  4480. !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
  4481. cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
  4482. msi_config->total_vectors, num_vectors);
  4483. if (num_vectors >= 0)
  4484. ret = -EINVAL;
  4485. goto reset_msi_config;
  4486. }
  4487. /* With VT-d disabled on x86 platform, only one pci irq vector is
  4488. * allocated. Once suspend the irq may be migrated to CPU0 if it was
  4489. * affine to other CPU with one new msi vector re-allocated.
  4490. * The observation cause the issue about no irq handler for vector
  4491. * once resume.
  4492. * The fix is to set irq vector affinity to CPU0 before calling
  4493. * request_irq to avoid the irq migration.
  4494. */
  4495. if (cnss_pci_is_one_msi(pci_priv)) {
  4496. ret = cnss_pci_irq_set_affinity_hint(pci_priv,
  4497. 0,
  4498. cpumask_of(0));
  4499. if (ret) {
  4500. cnss_pr_err("Failed to affinize irq vector to CPU0\n");
  4501. goto free_msi_vector;
  4502. }
  4503. }
  4504. if (cnss_pci_config_msi_addr(pci_priv)) {
  4505. ret = -EINVAL;
  4506. goto free_msi_vector;
  4507. }
  4508. if (cnss_pci_config_msi_data(pci_priv)) {
  4509. ret = -EINVAL;
  4510. goto free_msi_vector;
  4511. }
  4512. return 0;
  4513. free_msi_vector:
  4514. if (cnss_pci_is_one_msi(pci_priv))
  4515. cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
  4516. pci_free_irq_vectors(pci_priv->pci_dev);
  4517. reset_msi_config:
  4518. pci_priv->msi_config = NULL;
  4519. out:
  4520. return ret;
  4521. }
  4522. static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
  4523. {
  4524. if (pci_priv->device_id == QCA6174_DEVICE_ID)
  4525. return;
  4526. if (cnss_pci_is_one_msi(pci_priv))
  4527. cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
  4528. pci_free_irq_vectors(pci_priv->pci_dev);
  4529. }
  4530. int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
  4531. int *num_vectors, u32 *user_base_data,
  4532. u32 *base_vector)
  4533. {
  4534. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4535. struct cnss_msi_config *msi_config;
  4536. int idx;
  4537. if (!pci_priv)
  4538. return -ENODEV;
  4539. msi_config = pci_priv->msi_config;
  4540. if (!msi_config) {
  4541. cnss_pr_err("MSI is not supported.\n");
  4542. return -EINVAL;
  4543. }
  4544. for (idx = 0; idx < msi_config->total_users; idx++) {
  4545. if (strcmp(user_name, msi_config->users[idx].name) == 0) {
  4546. *num_vectors = msi_config->users[idx].num_vectors;
  4547. *user_base_data = msi_config->users[idx].base_vector
  4548. + pci_priv->msi_ep_base_data;
  4549. *base_vector = msi_config->users[idx].base_vector;
  4550. /*Add only single print for each user*/
  4551. if (print_optimize.msi_log_chk[idx]++)
  4552. goto skip_print;
  4553. cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
  4554. user_name, *num_vectors, *user_base_data,
  4555. *base_vector);
  4556. skip_print:
  4557. return 0;
  4558. }
  4559. }
  4560. cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
  4561. return -EINVAL;
  4562. }
  4563. EXPORT_SYMBOL(cnss_get_user_msi_assignment);
  4564. int cnss_get_msi_irq(struct device *dev, unsigned int vector)
  4565. {
  4566. struct pci_dev *pci_dev = to_pci_dev(dev);
  4567. int irq_num;
  4568. irq_num = pci_irq_vector(pci_dev, vector);
  4569. cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
  4570. return irq_num;
  4571. }
  4572. EXPORT_SYMBOL(cnss_get_msi_irq);
  4573. bool cnss_is_one_msi(struct device *dev)
  4574. {
  4575. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
  4576. if (!pci_priv)
  4577. return false;
  4578. return cnss_pci_is_one_msi(pci_priv);
  4579. }
  4580. EXPORT_SYMBOL(cnss_is_one_msi);
  4581. void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
  4582. u32 *msi_addr_high)
  4583. {
  4584. struct pci_dev *pci_dev = to_pci_dev(dev);
  4585. struct cnss_pci_data *pci_priv;
  4586. u16 control;
  4587. if (!pci_dev)
  4588. return;
  4589. pci_priv = cnss_get_pci_priv(pci_dev);
  4590. if (!pci_priv)
  4591. return;
  4592. if (pci_dev->msix_enabled) {
  4593. *msi_addr_low = pci_priv->msix_addr;
  4594. *msi_addr_high = 0;
  4595. if (!print_optimize.msi_addr_chk++)
  4596. cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
  4597. *msi_addr_low, *msi_addr_high);
  4598. return;
  4599. }
  4600. pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
  4601. &control);
  4602. pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
  4603. msi_addr_low);
  4604. /* Return MSI high address only when device supports 64-bit MSI */
  4605. if (control & PCI_MSI_FLAGS_64BIT)
  4606. pci_read_config_dword(pci_dev,
  4607. pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
  4608. msi_addr_high);
  4609. else
  4610. *msi_addr_high = 0;
  4611. /*Add only single print as the address is constant*/
  4612. if (!print_optimize.msi_addr_chk++)
  4613. cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
  4614. *msi_addr_low, *msi_addr_high);
  4615. }
  4616. EXPORT_SYMBOL(cnss_get_msi_address);
  4617. u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
  4618. {
  4619. int ret, num_vectors;
  4620. u32 user_base_data, base_vector;
  4621. if (!pci_priv)
  4622. return -ENODEV;
  4623. ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
  4624. WAKE_MSI_NAME, &num_vectors,
  4625. &user_base_data, &base_vector);
  4626. if (ret) {
  4627. cnss_pr_err("WAKE MSI is not valid\n");
  4628. return 0;
  4629. }
  4630. return user_base_data;
  4631. }
  4632. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
  4633. static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
  4634. {
  4635. return dma_set_mask(&pci_dev->dev, mask);
  4636. }
  4637. static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
  4638. u64 mask)
  4639. {
  4640. return dma_set_coherent_mask(&pci_dev->dev, mask);
  4641. }
  4642. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
  4643. static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
  4644. {
  4645. return pci_set_dma_mask(pci_dev, mask);
  4646. }
  4647. static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
  4648. u64 mask)
  4649. {
  4650. return pci_set_consistent_dma_mask(pci_dev, mask);
  4651. }
  4652. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
  4653. static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
  4654. {
  4655. int ret = 0;
  4656. struct pci_dev *pci_dev = pci_priv->pci_dev;
  4657. u16 device_id;
  4658. pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
  4659. if (device_id != pci_priv->pci_device_id->device) {
  4660. cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
  4661. device_id, pci_priv->pci_device_id->device);
  4662. ret = -EIO;
  4663. goto out;
  4664. }
  4665. ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
  4666. if (ret) {
  4667. pr_err("Failed to assign PCI resource, err = %d\n", ret);
  4668. goto out;
  4669. }
  4670. ret = pci_enable_device(pci_dev);
  4671. if (ret) {
  4672. cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
  4673. goto out;
  4674. }
  4675. ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
  4676. if (ret) {
  4677. cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
  4678. goto disable_device;
  4679. }
  4680. switch (device_id) {
  4681. case QCA6174_DEVICE_ID:
  4682. case QCN7605_DEVICE_ID:
  4683. pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
  4684. break;
  4685. case QCA6390_DEVICE_ID:
  4686. case QCA6490_DEVICE_ID:
  4687. case KIWI_DEVICE_ID:
  4688. case MANGO_DEVICE_ID:
  4689. case PEACH_DEVICE_ID:
  4690. pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
  4691. break;
  4692. default:
  4693. pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
  4694. break;
  4695. }
  4696. cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
  4697. ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
  4698. if (ret) {
  4699. cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
  4700. goto release_region;
  4701. }
  4702. ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
  4703. if (ret) {
  4704. cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
  4705. ret);
  4706. goto release_region;
  4707. }
  4708. pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
  4709. if (!pci_priv->bar) {
  4710. cnss_pr_err("Failed to do PCI IO map!\n");
  4711. ret = -EIO;
  4712. goto release_region;
  4713. }
  4714. /* Save default config space without BME enabled */
  4715. pci_save_state(pci_dev);
  4716. pci_priv->default_state = pci_store_saved_state(pci_dev);
  4717. pci_set_master(pci_dev);
  4718. return 0;
  4719. release_region:
  4720. pci_release_region(pci_dev, PCI_BAR_NUM);
  4721. disable_device:
  4722. pci_disable_device(pci_dev);
  4723. out:
  4724. return ret;
  4725. }
  4726. static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
  4727. {
  4728. struct pci_dev *pci_dev = pci_priv->pci_dev;
  4729. pci_clear_master(pci_dev);
  4730. pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
  4731. pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
  4732. if (pci_priv->bar) {
  4733. pci_iounmap(pci_dev, pci_priv->bar);
  4734. pci_priv->bar = NULL;
  4735. }
  4736. pci_release_region(pci_dev, PCI_BAR_NUM);
  4737. if (pci_is_enabled(pci_dev))
  4738. pci_disable_device(pci_dev);
  4739. }
  4740. static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
  4741. {
  4742. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4743. int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
  4744. gfp_t gfp = GFP_KERNEL;
  4745. u32 reg_offset;
  4746. if (in_interrupt() || irqs_disabled())
  4747. gfp = GFP_ATOMIC;
  4748. if (!plat_priv->qdss_reg) {
  4749. plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
  4750. sizeof(*plat_priv->qdss_reg)
  4751. * array_size, gfp);
  4752. if (!plat_priv->qdss_reg)
  4753. return;
  4754. }
  4755. cnss_pr_dbg("Start to dump qdss registers\n");
  4756. for (i = 0; qdss_csr[i].name; i++) {
  4757. reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
  4758. if (cnss_pci_reg_read(pci_priv, reg_offset,
  4759. &plat_priv->qdss_reg[i]))
  4760. return;
  4761. cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
  4762. plat_priv->qdss_reg[i]);
  4763. }
  4764. }
  4765. static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
  4766. enum cnss_ce_index ce)
  4767. {
  4768. int i;
  4769. u32 ce_base = ce * CE_REG_INTERVAL;
  4770. u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
  4771. switch (pci_priv->device_id) {
  4772. case QCA6390_DEVICE_ID:
  4773. src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
  4774. dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
  4775. cmn_base = QCA6390_CE_COMMON_REG_BASE;
  4776. break;
  4777. case QCA6490_DEVICE_ID:
  4778. src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
  4779. dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
  4780. cmn_base = QCA6490_CE_COMMON_REG_BASE;
  4781. break;
  4782. default:
  4783. return;
  4784. }
  4785. switch (ce) {
  4786. case CNSS_CE_09:
  4787. case CNSS_CE_10:
  4788. for (i = 0; ce_src[i].name; i++) {
  4789. reg_offset = src_ring_base + ce_base + ce_src[i].offset;
  4790. if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
  4791. return;
  4792. cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
  4793. ce, ce_src[i].name, reg_offset, val);
  4794. }
  4795. for (i = 0; ce_dst[i].name; i++) {
  4796. reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
  4797. if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
  4798. return;
  4799. cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
  4800. ce, ce_dst[i].name, reg_offset, val);
  4801. }
  4802. break;
  4803. case CNSS_CE_COMMON:
  4804. for (i = 0; ce_cmn[i].name; i++) {
  4805. reg_offset = cmn_base + ce_cmn[i].offset;
  4806. if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
  4807. return;
  4808. cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
  4809. ce_cmn[i].name, reg_offset, val);
  4810. }
  4811. break;
  4812. default:
  4813. cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
  4814. }
  4815. }
  4816. static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
  4817. {
  4818. if (cnss_pci_check_link_status(pci_priv))
  4819. return;
  4820. cnss_pr_dbg("Start to dump debug registers\n");
  4821. cnss_mhi_debug_reg_dump(pci_priv);
  4822. cnss_pci_soc_scratch_reg_dump(pci_priv);
  4823. cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
  4824. cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
  4825. cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
  4826. }
  4827. static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
  4828. {
  4829. if (cnss_get_host_sol_value(pci_priv->plat_priv))
  4830. return -EINVAL;
  4831. cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
  4832. cnss_set_host_sol_value(pci_priv->plat_priv, 1);
  4833. return 0;
  4834. }
  4835. static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
  4836. {
  4837. if (!cnss_pci_check_link_status(pci_priv))
  4838. cnss_mhi_debug_reg_dump(pci_priv);
  4839. cnss_pci_soc_scratch_reg_dump(pci_priv);
  4840. cnss_pci_dump_misc_reg(pci_priv);
  4841. cnss_pci_dump_shadow_reg(pci_priv);
  4842. }
  4843. int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
  4844. {
  4845. int ret;
  4846. struct cnss_plat_data *plat_priv;
  4847. if (!pci_priv)
  4848. return -ENODEV;
  4849. plat_priv = pci_priv->plat_priv;
  4850. if (!plat_priv)
  4851. return -ENODEV;
  4852. if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
  4853. test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
  4854. return -EINVAL;
  4855. /*
  4856. * Call pm_runtime_get_sync insteat of auto_resume to get
  4857. * reference and make sure runtime_suspend wont get called.
  4858. */
  4859. ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
  4860. if (ret < 0)
  4861. goto runtime_pm_put;
  4862. /*
  4863. * In some scenarios, cnss_pci_pm_runtime_get_sync
  4864. * might not resume PCI bus. For those cases do auto resume.
  4865. */
  4866. cnss_auto_resume(&pci_priv->pci_dev->dev);
  4867. if (!pci_priv->is_smmu_fault)
  4868. cnss_pci_mhi_reg_dump(pci_priv);
  4869. /* If link is still down here, directly trigger link down recovery */
  4870. ret = cnss_pci_check_link_status(pci_priv);
  4871. if (ret) {
  4872. cnss_pci_link_down(&pci_priv->pci_dev->dev);
  4873. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  4874. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  4875. return 0;
  4876. }
  4877. ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
  4878. if (ret) {
  4879. if (pci_priv->is_smmu_fault) {
  4880. cnss_pci_mhi_reg_dump(pci_priv);
  4881. pci_priv->is_smmu_fault = false;
  4882. }
  4883. if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
  4884. test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
  4885. cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
  4886. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  4887. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  4888. return 0;
  4889. }
  4890. cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
  4891. if (!cnss_pci_assert_host_sol(pci_priv)) {
  4892. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  4893. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  4894. return 0;
  4895. }
  4896. cnss_pci_dump_debug_reg(pci_priv);
  4897. cnss_schedule_recovery(&pci_priv->pci_dev->dev,
  4898. CNSS_REASON_DEFAULT);
  4899. goto runtime_pm_put;
  4900. }
  4901. if (pci_priv->is_smmu_fault) {
  4902. cnss_pci_mhi_reg_dump(pci_priv);
  4903. pci_priv->is_smmu_fault = false;
  4904. }
  4905. if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
  4906. mod_timer(&pci_priv->dev_rddm_timer,
  4907. jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
  4908. }
  4909. runtime_pm_put:
  4910. cnss_pci_pm_runtime_mark_last_busy(pci_priv);
  4911. cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
  4912. return ret;
  4913. }
  4914. static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
  4915. struct cnss_dump_seg *dump_seg,
  4916. enum cnss_fw_dump_type type, int seg_no,
  4917. void *va, dma_addr_t dma, size_t size)
  4918. {
  4919. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4920. struct device *dev = &pci_priv->pci_dev->dev;
  4921. phys_addr_t pa;
  4922. dump_seg->address = dma;
  4923. dump_seg->v_address = va;
  4924. dump_seg->size = size;
  4925. dump_seg->type = type;
  4926. cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
  4927. seg_no, va, &dma, size);
  4928. if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
  4929. return;
  4930. cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
  4931. }
  4932. static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
  4933. struct cnss_dump_seg *dump_seg,
  4934. enum cnss_fw_dump_type type, int seg_no,
  4935. void *va, dma_addr_t dma, size_t size)
  4936. {
  4937. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4938. struct device *dev = &pci_priv->pci_dev->dev;
  4939. phys_addr_t pa;
  4940. cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
  4941. cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
  4942. }
  4943. int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
  4944. enum cnss_driver_status status, void *data)
  4945. {
  4946. struct cnss_uevent_data uevent_data;
  4947. struct cnss_wlan_driver *driver_ops;
  4948. driver_ops = pci_priv->driver_ops;
  4949. if (!driver_ops || !driver_ops->update_event) {
  4950. cnss_pr_dbg("Hang event driver ops is NULL\n");
  4951. return -EINVAL;
  4952. }
  4953. cnss_pr_dbg("Calling driver uevent: %d\n", status);
  4954. uevent_data.status = status;
  4955. uevent_data.data = data;
  4956. return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
  4957. }
  4958. static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
  4959. {
  4960. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  4961. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  4962. struct cnss_hang_event hang_event;
  4963. void *hang_data_va = NULL;
  4964. u64 offset = 0;
  4965. u16 length = 0;
  4966. int i = 0;
  4967. if (!fw_mem || !plat_priv->fw_mem_seg_len)
  4968. return;
  4969. memset(&hang_event, 0, sizeof(hang_event));
  4970. switch (pci_priv->device_id) {
  4971. case QCA6390_DEVICE_ID:
  4972. offset = HST_HANG_DATA_OFFSET;
  4973. length = HANG_DATA_LENGTH;
  4974. break;
  4975. case QCA6490_DEVICE_ID:
  4976. /* Fallback to hard-coded values if hang event params not
  4977. * present in QMI. Once all the firmware branches have the
  4978. * fix to send params over QMI, this can be removed.
  4979. */
  4980. if (plat_priv->hang_event_data_len) {
  4981. offset = plat_priv->hang_data_addr_offset;
  4982. length = plat_priv->hang_event_data_len;
  4983. } else {
  4984. offset = HSP_HANG_DATA_OFFSET;
  4985. length = HANG_DATA_LENGTH;
  4986. }
  4987. break;
  4988. case KIWI_DEVICE_ID:
  4989. case MANGO_DEVICE_ID:
  4990. case PEACH_DEVICE_ID:
  4991. offset = plat_priv->hang_data_addr_offset;
  4992. length = plat_priv->hang_event_data_len;
  4993. break;
  4994. default:
  4995. cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
  4996. pci_priv->device_id);
  4997. return;
  4998. }
  4999. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  5000. if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
  5001. fw_mem[i].va) {
  5002. /* The offset must be < (fw_mem size- hangdata length) */
  5003. if (!(offset <= fw_mem[i].size - length))
  5004. goto exit;
  5005. hang_data_va = fw_mem[i].va + offset;
  5006. hang_event.hang_event_data = kmemdup(hang_data_va,
  5007. length,
  5008. GFP_ATOMIC);
  5009. if (!hang_event.hang_event_data) {
  5010. cnss_pr_dbg("Hang data memory alloc failed\n");
  5011. return;
  5012. }
  5013. hang_event.hang_event_data_len = length;
  5014. break;
  5015. }
  5016. }
  5017. cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
  5018. kfree(hang_event.hang_event_data);
  5019. hang_event.hang_event_data = NULL;
  5020. return;
  5021. exit:
  5022. cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
  5023. plat_priv->hang_data_addr_offset,
  5024. plat_priv->hang_event_data_len);
  5025. }
  5026. #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
  5027. void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
  5028. {
  5029. struct cnss_ssr_driver_dump_entry ssr_entry[CNSS_HOST_DUMP_TYPE_MAX] = {0};
  5030. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5031. size_t num_entries_loaded = 0;
  5032. int x;
  5033. int ret = -1;
  5034. if (pci_priv->driver_ops &&
  5035. pci_priv->driver_ops->collect_driver_dump) {
  5036. ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
  5037. ssr_entry,
  5038. &num_entries_loaded);
  5039. }
  5040. if (!ret) {
  5041. for (x = 0; x < num_entries_loaded; x++) {
  5042. cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
  5043. x, ssr_entry[x].buffer_pointer,
  5044. ssr_entry[x].region_name,
  5045. ssr_entry[x].buffer_size);
  5046. }
  5047. cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
  5048. } else {
  5049. cnss_pr_info("Host SSR elf dump collection feature disabled\n");
  5050. }
  5051. }
  5052. #endif
  5053. void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
  5054. {
  5055. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5056. struct cnss_dump_data *dump_data =
  5057. &plat_priv->ramdump_info_v2.dump_data;
  5058. struct cnss_dump_seg *dump_seg =
  5059. plat_priv->ramdump_info_v2.dump_data_vaddr;
  5060. struct image_info *fw_image, *rddm_image;
  5061. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  5062. int ret, i, j;
  5063. if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
  5064. !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
  5065. cnss_pci_send_hang_event(pci_priv);
  5066. if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
  5067. cnss_pr_dbg("RAM dump is already collected, skip\n");
  5068. return;
  5069. }
  5070. if (!cnss_is_device_powered_on(plat_priv)) {
  5071. cnss_pr_dbg("Device is already powered off, skip\n");
  5072. return;
  5073. }
  5074. if (!in_panic) {
  5075. mutex_lock(&pci_priv->bus_lock);
  5076. ret = cnss_pci_check_link_status(pci_priv);
  5077. if (ret) {
  5078. if (ret != -EACCES) {
  5079. mutex_unlock(&pci_priv->bus_lock);
  5080. return;
  5081. }
  5082. if (cnss_pci_resume_bus(pci_priv)) {
  5083. mutex_unlock(&pci_priv->bus_lock);
  5084. return;
  5085. }
  5086. }
  5087. mutex_unlock(&pci_priv->bus_lock);
  5088. } else {
  5089. if (cnss_pci_check_link_status(pci_priv))
  5090. return;
  5091. /* Inside panic handler, reduce timeout for RDDM to avoid
  5092. * unnecessary hypervisor watchdog bite.
  5093. */
  5094. pci_priv->mhi_ctrl->timeout_ms /= 2;
  5095. }
  5096. cnss_mhi_debug_reg_dump(pci_priv);
  5097. cnss_pci_soc_scratch_reg_dump(pci_priv);
  5098. cnss_pci_dump_misc_reg(pci_priv);
  5099. cnss_rddm_trigger_debug(pci_priv);
  5100. ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
  5101. if (ret) {
  5102. cnss_fatal_err("Failed to download RDDM image, err = %d\n",
  5103. ret);
  5104. if (!cnss_pci_assert_host_sol(pci_priv))
  5105. return;
  5106. cnss_rddm_trigger_check(pci_priv);
  5107. cnss_pci_dump_debug_reg(pci_priv);
  5108. return;
  5109. }
  5110. cnss_rddm_trigger_check(pci_priv);
  5111. fw_image = pci_priv->mhi_ctrl->fbc_image;
  5112. rddm_image = pci_priv->mhi_ctrl->rddm_image;
  5113. dump_data->nentries = 0;
  5114. if (plat_priv->qdss_mem_seg_len)
  5115. cnss_pci_dump_qdss_reg(pci_priv);
  5116. cnss_mhi_dump_sfr(pci_priv);
  5117. if (!dump_seg) {
  5118. cnss_pr_warn("FW image dump collection not setup");
  5119. goto skip_dump;
  5120. }
  5121. cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
  5122. fw_image->entries);
  5123. for (i = 0; i < fw_image->entries; i++) {
  5124. cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
  5125. fw_image->mhi_buf[i].buf,
  5126. fw_image->mhi_buf[i].dma_addr,
  5127. fw_image->mhi_buf[i].len);
  5128. dump_seg++;
  5129. }
  5130. dump_data->nentries += fw_image->entries;
  5131. cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
  5132. rddm_image->entries);
  5133. for (i = 0; i < rddm_image->entries; i++) {
  5134. cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
  5135. rddm_image->mhi_buf[i].buf,
  5136. rddm_image->mhi_buf[i].dma_addr,
  5137. rddm_image->mhi_buf[i].len);
  5138. dump_seg++;
  5139. }
  5140. dump_data->nentries += rddm_image->entries;
  5141. for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
  5142. if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
  5143. if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
  5144. cnss_pr_dbg("Collect remote heap dump segment\n");
  5145. cnss_pci_add_dump_seg(pci_priv, dump_seg,
  5146. CNSS_FW_REMOTE_HEAP, j,
  5147. fw_mem[i].va,
  5148. fw_mem[i].pa,
  5149. fw_mem[i].size);
  5150. dump_seg++;
  5151. dump_data->nentries++;
  5152. j++;
  5153. } else {
  5154. cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
  5155. }
  5156. }
  5157. }
  5158. if (dump_data->nentries > 0)
  5159. plat_priv->ramdump_info_v2.dump_data_valid = true;
  5160. cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
  5161. skip_dump:
  5162. complete(&plat_priv->rddm_complete);
  5163. }
  5164. void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
  5165. {
  5166. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5167. struct cnss_dump_seg *dump_seg =
  5168. plat_priv->ramdump_info_v2.dump_data_vaddr;
  5169. struct image_info *fw_image, *rddm_image;
  5170. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  5171. int i, j;
  5172. if (!dump_seg)
  5173. return;
  5174. fw_image = pci_priv->mhi_ctrl->fbc_image;
  5175. rddm_image = pci_priv->mhi_ctrl->rddm_image;
  5176. for (i = 0; i < fw_image->entries; i++) {
  5177. cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
  5178. fw_image->mhi_buf[i].buf,
  5179. fw_image->mhi_buf[i].dma_addr,
  5180. fw_image->mhi_buf[i].len);
  5181. dump_seg++;
  5182. }
  5183. for (i = 0; i < rddm_image->entries; i++) {
  5184. cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
  5185. rddm_image->mhi_buf[i].buf,
  5186. rddm_image->mhi_buf[i].dma_addr,
  5187. rddm_image->mhi_buf[i].len);
  5188. dump_seg++;
  5189. }
  5190. for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
  5191. if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
  5192. (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
  5193. cnss_pci_remove_dump_seg(pci_priv, dump_seg,
  5194. CNSS_FW_REMOTE_HEAP, j,
  5195. fw_mem[i].va, fw_mem[i].pa,
  5196. fw_mem[i].size);
  5197. dump_seg++;
  5198. j++;
  5199. }
  5200. }
  5201. plat_priv->ramdump_info_v2.dump_data.nentries = 0;
  5202. plat_priv->ramdump_info_v2.dump_data_valid = false;
  5203. }
  5204. void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
  5205. {
  5206. struct cnss_plat_data *plat_priv;
  5207. if (!pci_priv) {
  5208. cnss_pr_err("pci_priv is NULL\n");
  5209. return;
  5210. }
  5211. plat_priv = pci_priv->plat_priv;
  5212. if (!plat_priv) {
  5213. cnss_pr_err("plat_priv is NULL\n");
  5214. return;
  5215. }
  5216. if (plat_priv->recovery_enabled)
  5217. cnss_pci_collect_host_dump_info(pci_priv);
  5218. /* Call recovery handler in the DRIVER_RECOVERY event context
  5219. * instead of scheduling work. In that way complete recovery
  5220. * will be done as part of DRIVER_RECOVERY event and get
  5221. * serialized with other events.
  5222. */
  5223. cnss_recovery_handler(plat_priv);
  5224. }
  5225. static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
  5226. {
  5227. struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
  5228. return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
  5229. }
  5230. static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
  5231. {
  5232. struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
  5233. cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
  5234. }
  5235. void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
  5236. char *prefix_name, char *name)
  5237. {
  5238. struct cnss_plat_data *plat_priv;
  5239. if (!pci_priv)
  5240. return;
  5241. plat_priv = pci_priv->plat_priv;
  5242. if (!plat_priv->use_fw_path_with_prefix) {
  5243. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
  5244. return;
  5245. }
  5246. switch (pci_priv->device_id) {
  5247. case QCN7605_DEVICE_ID:
  5248. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5249. QCN7605_PATH_PREFIX "%s", name);
  5250. break;
  5251. case QCA6390_DEVICE_ID:
  5252. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5253. QCA6390_PATH_PREFIX "%s", name);
  5254. break;
  5255. case QCA6490_DEVICE_ID:
  5256. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5257. QCA6490_PATH_PREFIX "%s", name);
  5258. break;
  5259. case KIWI_DEVICE_ID:
  5260. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5261. KIWI_PATH_PREFIX "%s", name);
  5262. break;
  5263. case MANGO_DEVICE_ID:
  5264. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5265. MANGO_PATH_PREFIX "%s", name);
  5266. break;
  5267. case PEACH_DEVICE_ID:
  5268. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
  5269. PEACH_PATH_PREFIX "%s", name);
  5270. break;
  5271. default:
  5272. scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
  5273. break;
  5274. }
  5275. cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
  5276. }
  5277. static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
  5278. {
  5279. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5280. switch (pci_priv->device_id) {
  5281. case QCA6390_DEVICE_ID:
  5282. if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
  5283. cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
  5284. pci_priv->device_id,
  5285. plat_priv->device_version.major_version);
  5286. return -EINVAL;
  5287. }
  5288. cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
  5289. FW_V2_FILE_NAME);
  5290. snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
  5291. FW_V2_FILE_NAME);
  5292. break;
  5293. case QCA6490_DEVICE_ID:
  5294. switch (plat_priv->device_version.major_version) {
  5295. case FW_V2_NUMBER:
  5296. cnss_pci_add_fw_prefix_name(pci_priv,
  5297. plat_priv->firmware_name,
  5298. FW_V2_FILE_NAME);
  5299. snprintf(plat_priv->fw_fallback_name,
  5300. MAX_FIRMWARE_NAME_LEN,
  5301. FW_V2_FILE_NAME);
  5302. break;
  5303. default:
  5304. cnss_pci_add_fw_prefix_name(pci_priv,
  5305. plat_priv->firmware_name,
  5306. DEFAULT_FW_FILE_NAME);
  5307. snprintf(plat_priv->fw_fallback_name,
  5308. MAX_FIRMWARE_NAME_LEN,
  5309. DEFAULT_FW_FILE_NAME);
  5310. break;
  5311. }
  5312. break;
  5313. case KIWI_DEVICE_ID:
  5314. case MANGO_DEVICE_ID:
  5315. case PEACH_DEVICE_ID:
  5316. switch (plat_priv->device_version.major_version) {
  5317. case FW_V2_NUMBER:
  5318. /*
  5319. * kiwiv2 using seprate fw binary for MM and FTM mode,
  5320. * platform driver loads corresponding binary according
  5321. * to current mode indicated by wlan driver. Otherwise
  5322. * use default binary.
  5323. * Mission mode using same binary name as before,
  5324. * if seprate binary is not there, fall back to default.
  5325. */
  5326. if (plat_priv->driver_mode == CNSS_MISSION) {
  5327. cnss_pci_add_fw_prefix_name(pci_priv,
  5328. plat_priv->firmware_name,
  5329. FW_V2_FILE_NAME);
  5330. cnss_pci_add_fw_prefix_name(pci_priv,
  5331. plat_priv->fw_fallback_name,
  5332. FW_V2_FILE_NAME);
  5333. } else if (plat_priv->driver_mode == CNSS_FTM) {
  5334. cnss_pci_add_fw_prefix_name(pci_priv,
  5335. plat_priv->firmware_name,
  5336. FW_V2_FTM_FILE_NAME);
  5337. cnss_pci_add_fw_prefix_name(pci_priv,
  5338. plat_priv->fw_fallback_name,
  5339. FW_V2_FILE_NAME);
  5340. } else {
  5341. /*
  5342. * Since during cold boot calibration phase,
  5343. * wlan driver has not registered, so default
  5344. * fw binary will be used.
  5345. */
  5346. cnss_pci_add_fw_prefix_name(pci_priv,
  5347. plat_priv->firmware_name,
  5348. FW_V2_FILE_NAME);
  5349. snprintf(plat_priv->fw_fallback_name,
  5350. MAX_FIRMWARE_NAME_LEN,
  5351. FW_V2_FILE_NAME);
  5352. }
  5353. break;
  5354. default:
  5355. cnss_pci_add_fw_prefix_name(pci_priv,
  5356. plat_priv->firmware_name,
  5357. DEFAULT_FW_FILE_NAME);
  5358. snprintf(plat_priv->fw_fallback_name,
  5359. MAX_FIRMWARE_NAME_LEN,
  5360. DEFAULT_FW_FILE_NAME);
  5361. break;
  5362. }
  5363. break;
  5364. default:
  5365. cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
  5366. DEFAULT_FW_FILE_NAME);
  5367. snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
  5368. DEFAULT_FW_FILE_NAME);
  5369. break;
  5370. }
  5371. cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
  5372. plat_priv->firmware_name, plat_priv->fw_fallback_name);
  5373. return 0;
  5374. }
  5375. static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
  5376. {
  5377. switch (status) {
  5378. case MHI_CB_IDLE:
  5379. return "IDLE";
  5380. case MHI_CB_EE_RDDM:
  5381. return "RDDM";
  5382. case MHI_CB_SYS_ERROR:
  5383. return "SYS_ERROR";
  5384. case MHI_CB_FATAL_ERROR:
  5385. return "FATAL_ERROR";
  5386. case MHI_CB_EE_MISSION_MODE:
  5387. return "MISSION_MODE";
  5388. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  5389. case MHI_CB_FALLBACK_IMG:
  5390. return "FW_FALLBACK";
  5391. #endif
  5392. default:
  5393. return "UNKNOWN";
  5394. }
  5395. };
  5396. static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
  5397. {
  5398. struct cnss_pci_data *pci_priv =
  5399. from_timer(pci_priv, t, dev_rddm_timer);
  5400. enum mhi_ee_type mhi_ee;
  5401. if (!pci_priv)
  5402. return;
  5403. cnss_fatal_err("Timeout waiting for RDDM notification\n");
  5404. if (!cnss_pci_assert_host_sol(pci_priv))
  5405. return;
  5406. mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
  5407. if (mhi_ee == MHI_EE_PBL)
  5408. cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
  5409. if (mhi_ee == MHI_EE_RDDM) {
  5410. cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
  5411. cnss_schedule_recovery(&pci_priv->pci_dev->dev,
  5412. CNSS_REASON_RDDM);
  5413. } else {
  5414. cnss_mhi_debug_reg_dump(pci_priv);
  5415. cnss_pci_soc_scratch_reg_dump(pci_priv);
  5416. cnss_schedule_recovery(&pci_priv->pci_dev->dev,
  5417. CNSS_REASON_TIMEOUT);
  5418. }
  5419. }
  5420. static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
  5421. {
  5422. struct cnss_pci_data *pci_priv =
  5423. from_timer(pci_priv, t, boot_debug_timer);
  5424. if (!pci_priv)
  5425. return;
  5426. if (cnss_pci_check_link_status(pci_priv))
  5427. return;
  5428. if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
  5429. return;
  5430. if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
  5431. return;
  5432. if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
  5433. return;
  5434. cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
  5435. BOOT_DEBUG_TIMEOUT_MS / 1000);
  5436. cnss_mhi_debug_reg_dump(pci_priv);
  5437. cnss_pci_soc_scratch_reg_dump(pci_priv);
  5438. cnss_pci_dump_bl_sram_mem(pci_priv);
  5439. mod_timer(&pci_priv->boot_debug_timer,
  5440. jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
  5441. }
  5442. static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
  5443. {
  5444. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5445. cnss_ignore_qmi_failure(true);
  5446. set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
  5447. del_timer(&plat_priv->fw_boot_timer);
  5448. mod_timer(&pci_priv->dev_rddm_timer,
  5449. jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
  5450. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  5451. return 0;
  5452. }
  5453. int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
  5454. {
  5455. return cnss_pci_handle_mhi_sys_err(pci_priv);
  5456. }
  5457. static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
  5458. enum mhi_callback reason)
  5459. {
  5460. struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
  5461. struct cnss_plat_data *plat_priv;
  5462. enum cnss_recovery_reason cnss_reason;
  5463. if (!pci_priv) {
  5464. cnss_pr_err("pci_priv is NULL");
  5465. return;
  5466. }
  5467. plat_priv = pci_priv->plat_priv;
  5468. if (reason != MHI_CB_IDLE)
  5469. cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
  5470. cnss_mhi_notify_status_to_str(reason), reason);
  5471. switch (reason) {
  5472. case MHI_CB_IDLE:
  5473. case MHI_CB_EE_MISSION_MODE:
  5474. return;
  5475. case MHI_CB_FATAL_ERROR:
  5476. cnss_ignore_qmi_failure(true);
  5477. set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
  5478. del_timer(&plat_priv->fw_boot_timer);
  5479. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  5480. cnss_reason = CNSS_REASON_DEFAULT;
  5481. break;
  5482. case MHI_CB_SYS_ERROR:
  5483. cnss_pci_handle_mhi_sys_err(pci_priv);
  5484. return;
  5485. case MHI_CB_EE_RDDM:
  5486. cnss_ignore_qmi_failure(true);
  5487. set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
  5488. del_timer(&plat_priv->fw_boot_timer);
  5489. del_timer(&pci_priv->dev_rddm_timer);
  5490. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  5491. cnss_reason = CNSS_REASON_RDDM;
  5492. break;
  5493. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  5494. case MHI_CB_FALLBACK_IMG:
  5495. /* for kiwi_v2 binary fallback is used, skip path fallback here */
  5496. if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
  5497. plat_priv->device_version.major_version == FW_V2_NUMBER)) {
  5498. plat_priv->use_fw_path_with_prefix = false;
  5499. cnss_pci_update_fw_name(pci_priv);
  5500. }
  5501. return;
  5502. #endif
  5503. default:
  5504. cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
  5505. return;
  5506. }
  5507. cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
  5508. }
  5509. static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
  5510. {
  5511. int ret, num_vectors, i;
  5512. u32 user_base_data, base_vector;
  5513. int *irq;
  5514. unsigned int msi_data;
  5515. bool is_one_msi = false;
  5516. ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
  5517. MHI_MSI_NAME, &num_vectors,
  5518. &user_base_data, &base_vector);
  5519. if (ret)
  5520. return ret;
  5521. if (cnss_pci_is_one_msi(pci_priv)) {
  5522. is_one_msi = true;
  5523. num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
  5524. }
  5525. cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
  5526. num_vectors, base_vector);
  5527. irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
  5528. if (!irq)
  5529. return -ENOMEM;
  5530. for (i = 0; i < num_vectors; i++) {
  5531. msi_data = base_vector;
  5532. if (!is_one_msi)
  5533. msi_data += i;
  5534. irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
  5535. }
  5536. pci_priv->mhi_ctrl->irq = irq;
  5537. pci_priv->mhi_ctrl->nr_irqs = num_vectors;
  5538. return 0;
  5539. }
  5540. static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
  5541. struct mhi_link_info *link_info)
  5542. {
  5543. struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
  5544. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5545. int ret = 0;
  5546. cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
  5547. link_info->target_link_speed,
  5548. link_info->target_link_width);
  5549. /* It has to set target link speed here before setting link bandwidth
  5550. * when device requests link speed change. This can avoid setting link
  5551. * bandwidth getting rejected if requested link speed is higher than
  5552. * current one.
  5553. */
  5554. ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
  5555. link_info->target_link_speed);
  5556. if (ret)
  5557. cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
  5558. link_info->target_link_speed, ret);
  5559. ret = cnss_pci_set_link_bandwidth(pci_priv,
  5560. link_info->target_link_speed,
  5561. link_info->target_link_width);
  5562. if (ret) {
  5563. cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
  5564. return ret;
  5565. }
  5566. pci_priv->def_link_speed = link_info->target_link_speed;
  5567. pci_priv->def_link_width = link_info->target_link_width;
  5568. return 0;
  5569. }
  5570. static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
  5571. void __iomem *addr, u32 *out)
  5572. {
  5573. struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
  5574. u32 tmp = readl_relaxed(addr);
  5575. /* Unexpected value, query the link status */
  5576. if (PCI_INVALID_READ(tmp) &&
  5577. cnss_pci_check_link_status(pci_priv))
  5578. return -EIO;
  5579. *out = tmp;
  5580. return 0;
  5581. }
  5582. static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
  5583. void __iomem *addr, u32 val)
  5584. {
  5585. writel_relaxed(val, addr);
  5586. }
  5587. static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
  5588. struct mhi_controller *mhi_ctrl)
  5589. {
  5590. int ret = 0;
  5591. ret = mhi_get_soc_info(mhi_ctrl);
  5592. if (ret)
  5593. goto exit;
  5594. plat_priv->device_version.family_number = mhi_ctrl->family_number;
  5595. plat_priv->device_version.device_number = mhi_ctrl->device_number;
  5596. plat_priv->device_version.major_version = mhi_ctrl->major_version;
  5597. plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
  5598. cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
  5599. plat_priv->device_version.family_number,
  5600. plat_priv->device_version.device_number,
  5601. plat_priv->device_version.major_version,
  5602. plat_priv->device_version.minor_version);
  5603. /* Only keep lower 4 bits as real device major version */
  5604. plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
  5605. exit:
  5606. return ret;
  5607. }
  5608. static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
  5609. {
  5610. if (!pci_priv) {
  5611. cnss_pr_dbg("pci_priv is NULL");
  5612. return false;
  5613. }
  5614. switch (pci_priv->device_id) {
  5615. case PEACH_DEVICE_ID:
  5616. return true;
  5617. default:
  5618. return false;
  5619. }
  5620. }
  5621. static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
  5622. {
  5623. int ret = 0;
  5624. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5625. struct pci_dev *pci_dev = pci_priv->pci_dev;
  5626. struct mhi_controller *mhi_ctrl;
  5627. phys_addr_t bar_start;
  5628. const struct mhi_controller_config *cnss_mhi_config =
  5629. &cnss_mhi_config_default;
  5630. ret = cnss_qmi_init(plat_priv);
  5631. if (ret)
  5632. return -EINVAL;
  5633. if (pci_priv->device_id == QCA6174_DEVICE_ID)
  5634. return 0;
  5635. mhi_ctrl = mhi_alloc_controller();
  5636. if (!mhi_ctrl) {
  5637. cnss_pr_err("Invalid MHI controller context\n");
  5638. return -EINVAL;
  5639. }
  5640. pci_priv->mhi_ctrl = mhi_ctrl;
  5641. mhi_ctrl->cntrl_dev = &pci_dev->dev;
  5642. mhi_ctrl->fw_image = plat_priv->firmware_name;
  5643. #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
  5644. mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
  5645. #endif
  5646. mhi_ctrl->regs = pci_priv->bar;
  5647. mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
  5648. bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
  5649. cnss_pr_dbg("BAR starts at %pa, length is %x\n",
  5650. &bar_start, mhi_ctrl->reg_len);
  5651. ret = cnss_pci_get_mhi_msi(pci_priv);
  5652. if (ret) {
  5653. cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
  5654. goto free_mhi_ctrl;
  5655. }
  5656. if (cnss_pci_is_one_msi(pci_priv))
  5657. mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
  5658. if (pci_priv->smmu_s1_enable) {
  5659. mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
  5660. mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
  5661. pci_priv->smmu_iova_len;
  5662. } else {
  5663. mhi_ctrl->iova_start = 0;
  5664. mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
  5665. }
  5666. mhi_ctrl->status_cb = cnss_mhi_notify_status;
  5667. mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
  5668. mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
  5669. mhi_ctrl->read_reg = cnss_mhi_read_reg;
  5670. mhi_ctrl->write_reg = cnss_mhi_write_reg;
  5671. mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
  5672. if (!mhi_ctrl->rddm_size)
  5673. mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
  5674. if (plat_priv->device_id == QCN7605_DEVICE_ID)
  5675. mhi_ctrl->sbl_size = SZ_256K;
  5676. else
  5677. mhi_ctrl->sbl_size = SZ_512K;
  5678. mhi_ctrl->seg_len = SZ_512K;
  5679. mhi_ctrl->fbc_download = true;
  5680. ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
  5681. if (ret)
  5682. goto free_mhi_irq;
  5683. /* Satellite config only supported on KIWI V2 and later chipset */
  5684. if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
  5685. (plat_priv->device_id == KIWI_DEVICE_ID &&
  5686. plat_priv->device_version.major_version == 1)) {
  5687. if (plat_priv->device_id == QCN7605_DEVICE_ID)
  5688. cnss_mhi_config = &cnss_mhi_config_genoa;
  5689. else
  5690. cnss_mhi_config = &cnss_mhi_config_no_satellite;
  5691. }
  5692. mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
  5693. ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
  5694. if (ret) {
  5695. cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
  5696. goto free_mhi_irq;
  5697. }
  5698. /* MHI satellite driver only needs to connect when DRV is supported */
  5699. if (cnss_pci_get_drv_supported(pci_priv))
  5700. cnss_mhi_controller_set_base(pci_priv, bar_start);
  5701. cnss_get_bwscal_info(plat_priv);
  5702. cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
  5703. /* BW scale CB needs to be set after registering MHI per requirement */
  5704. if (!plat_priv->no_bwscale)
  5705. cnss_mhi_controller_set_bw_scale_cb(pci_priv,
  5706. cnss_mhi_bw_scale);
  5707. ret = cnss_pci_update_fw_name(pci_priv);
  5708. if (ret)
  5709. goto unreg_mhi;
  5710. return 0;
  5711. unreg_mhi:
  5712. mhi_unregister_controller(mhi_ctrl);
  5713. free_mhi_irq:
  5714. kfree(mhi_ctrl->irq);
  5715. free_mhi_ctrl:
  5716. mhi_free_controller(mhi_ctrl);
  5717. return ret;
  5718. }
  5719. static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
  5720. {
  5721. struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
  5722. if (pci_priv->device_id == QCA6174_DEVICE_ID)
  5723. return;
  5724. mhi_unregister_controller(mhi_ctrl);
  5725. kfree(mhi_ctrl->irq);
  5726. mhi_ctrl->irq = NULL;
  5727. mhi_free_controller(mhi_ctrl);
  5728. pci_priv->mhi_ctrl = NULL;
  5729. }
  5730. static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
  5731. {
  5732. switch (pci_priv->device_id) {
  5733. case QCA6390_DEVICE_ID:
  5734. pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
  5735. pci_priv->wcss_reg = wcss_reg_access_seq;
  5736. pci_priv->pcie_reg = pcie_reg_access_seq;
  5737. pci_priv->wlaon_reg = wlaon_reg_access_seq;
  5738. pci_priv->syspm_reg = syspm_reg_access_seq;
  5739. /* Configure WDOG register with specific value so that we can
  5740. * know if HW is in the process of WDOG reset recovery or not
  5741. * when reading the registers.
  5742. */
  5743. cnss_pci_reg_write
  5744. (pci_priv,
  5745. QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
  5746. QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
  5747. break;
  5748. case QCA6490_DEVICE_ID:
  5749. pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
  5750. pci_priv->wlaon_reg = wlaon_reg_access_seq;
  5751. break;
  5752. default:
  5753. return;
  5754. }
  5755. }
  5756. #if !IS_ENABLED(CONFIG_ARCH_QCOM)
  5757. static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
  5758. {
  5759. return 0;
  5760. }
  5761. static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
  5762. {
  5763. struct cnss_pci_data *pci_priv = data;
  5764. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5765. enum rpm_status status;
  5766. struct device *dev;
  5767. pci_priv->wake_counter++;
  5768. cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
  5769. pci_priv->wake_irq, pci_priv->wake_counter);
  5770. /* Make sure abort current suspend */
  5771. cnss_pm_stay_awake(plat_priv);
  5772. cnss_pm_relax(plat_priv);
  5773. /* Above two pm* API calls will abort system suspend only when
  5774. * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
  5775. * calling pm_system_wakeup() is just to guarantee system suspend
  5776. * can be aborted if it is not initiated in any case.
  5777. */
  5778. pm_system_wakeup();
  5779. dev = &pci_priv->pci_dev->dev;
  5780. status = dev->power.runtime_status;
  5781. if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
  5782. cnss_pci_get_auto_suspended(pci_priv)) ||
  5783. (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
  5784. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  5785. cnss_pci_pm_request_resume(pci_priv);
  5786. }
  5787. return IRQ_HANDLED;
  5788. }
  5789. /**
  5790. * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
  5791. * @pci_priv: driver PCI bus context pointer
  5792. *
  5793. * This function initializes WLAN PCI wake GPIO and corresponding
  5794. * interrupt. It should be used in non-MSM platforms whose PCIe
  5795. * root complex driver doesn't handle the GPIO.
  5796. *
  5797. * Return: 0 for success or skip, negative value for error
  5798. */
  5799. static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
  5800. {
  5801. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5802. struct device *dev = &plat_priv->plat_dev->dev;
  5803. int ret = 0;
  5804. pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
  5805. "wlan-pci-wake-gpio", 0);
  5806. if (pci_priv->wake_gpio < 0)
  5807. goto out;
  5808. cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
  5809. pci_priv->wake_gpio);
  5810. ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
  5811. if (ret) {
  5812. cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
  5813. ret);
  5814. goto out;
  5815. }
  5816. gpio_direction_input(pci_priv->wake_gpio);
  5817. pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
  5818. ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
  5819. IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
  5820. if (ret) {
  5821. cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
  5822. goto free_gpio;
  5823. }
  5824. ret = enable_irq_wake(pci_priv->wake_irq);
  5825. if (ret) {
  5826. cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
  5827. goto free_irq;
  5828. }
  5829. return 0;
  5830. free_irq:
  5831. free_irq(pci_priv->wake_irq, pci_priv);
  5832. free_gpio:
  5833. gpio_free(pci_priv->wake_gpio);
  5834. out:
  5835. return ret;
  5836. }
  5837. static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
  5838. {
  5839. if (pci_priv->wake_gpio < 0)
  5840. return;
  5841. disable_irq_wake(pci_priv->wake_irq);
  5842. free_irq(pci_priv->wake_irq, pci_priv);
  5843. gpio_free(pci_priv->wake_gpio);
  5844. }
  5845. #endif
  5846. #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
  5847. static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
  5848. {
  5849. int ret = 0;
  5850. /* in the dual wlan card case, if call pci_register_driver after
  5851. * finishing the first pcie device enumeration, it will cause
  5852. * the cnss_pci_probe called in advance with the second wlan card,
  5853. * and the sequence like this:
  5854. * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
  5855. * -> exit msm_pcie_enumerate.
  5856. * But the correct sequence we expected is like this:
  5857. * enter msm_pcie_enumerate -> pci_bus_add_devices ->
  5858. * exit msm_pcie_enumerate -> cnss_pci_probe.
  5859. * And this unexpected sequence will make the second wlan card do
  5860. * pcie link suspend while the pcie enumeration not finished.
  5861. * So need to add below logical to avoid doing pcie link suspend
  5862. * if the enumeration has not finish.
  5863. */
  5864. plat_priv->enumerate_done = true;
  5865. /* Now enumeration is finished, try to suspend PCIe link */
  5866. if (plat_priv->bus_priv) {
  5867. struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
  5868. struct pci_dev *pci_dev = pci_priv->pci_dev;
  5869. switch (pci_dev->device) {
  5870. case QCA6390_DEVICE_ID:
  5871. cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
  5872. false,
  5873. true,
  5874. false);
  5875. cnss_pci_suspend_pwroff(pci_dev);
  5876. break;
  5877. default:
  5878. cnss_pr_err("Unknown PCI device found: 0x%x\n",
  5879. pci_dev->device);
  5880. ret = -ENODEV;
  5881. }
  5882. }
  5883. return ret;
  5884. }
  5885. #else
  5886. static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
  5887. {
  5888. return 0;
  5889. }
  5890. #endif
  5891. /* Setting to use this cnss_pm_domain ops will let PM framework override the
  5892. * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
  5893. * has to take care everything device driver needed which is currently done
  5894. * from pci_dev_pm_ops.
  5895. */
  5896. static struct dev_pm_domain cnss_pm_domain = {
  5897. .ops = {
  5898. SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
  5899. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
  5900. cnss_pci_resume_noirq)
  5901. SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
  5902. cnss_pci_runtime_resume,
  5903. cnss_pci_runtime_idle)
  5904. }
  5905. };
  5906. static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
  5907. {
  5908. struct device_node *child;
  5909. u32 id, i;
  5910. int id_n, ret;
  5911. if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
  5912. return 0;
  5913. if (!plat_priv->device_id) {
  5914. cnss_pr_err("Invalid device id\n");
  5915. return -EINVAL;
  5916. }
  5917. for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
  5918. child) {
  5919. if (strcmp(child->name, "chip_cfg"))
  5920. continue;
  5921. id_n = of_property_count_u32_elems(child, "supported-ids");
  5922. if (id_n <= 0) {
  5923. cnss_pr_err("Device id is NOT set\n");
  5924. return -EINVAL;
  5925. }
  5926. for (i = 0; i < id_n; i++) {
  5927. ret = of_property_read_u32_index(child,
  5928. "supported-ids",
  5929. i, &id);
  5930. if (ret) {
  5931. cnss_pr_err("Failed to read supported ids\n");
  5932. return -EINVAL;
  5933. }
  5934. if (id == plat_priv->device_id) {
  5935. plat_priv->dev_node = child;
  5936. cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
  5937. child->name, i, id);
  5938. return 0;
  5939. }
  5940. }
  5941. }
  5942. return -EINVAL;
  5943. }
  5944. #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
  5945. static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
  5946. {
  5947. bool suspend_pwroff;
  5948. switch (pci_dev->device) {
  5949. case QCA6390_DEVICE_ID:
  5950. case QCA6490_DEVICE_ID:
  5951. suspend_pwroff = false;
  5952. break;
  5953. default:
  5954. suspend_pwroff = true;
  5955. }
  5956. return suspend_pwroff;
  5957. }
  5958. #else
  5959. static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
  5960. {
  5961. return true;
  5962. }
  5963. #endif
  5964. static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
  5965. {
  5966. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  5967. int rc_num = pci_dev->bus->domain_nr;
  5968. struct cnss_plat_data *plat_priv;
  5969. int ret = 0;
  5970. bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
  5971. plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
  5972. if (suspend_pwroff) {
  5973. ret = cnss_suspend_pci_link(pci_priv);
  5974. if (ret)
  5975. cnss_pr_err("Failed to suspend PCI link, err = %d\n",
  5976. ret);
  5977. cnss_power_off_device(plat_priv);
  5978. } else {
  5979. cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
  5980. pci_dev->device);
  5981. }
  5982. }
  5983. #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
  5984. static void
  5985. cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
  5986. {
  5987. int ret;
  5988. ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
  5989. PCI_EXP_LNKSTA_CLS_2_5GB);
  5990. if (ret)
  5991. cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
  5992. rc_num, ret);
  5993. }
  5994. static void
  5995. cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
  5996. {
  5997. int ret;
  5998. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  5999. /* if not Genoa, do not restore rc speed */
  6000. if (pci_priv->device_id != QCN7605_DEVICE_ID) {
  6001. /* The request 0 will reset maximum GEN speed to default */
  6002. ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
  6003. if (ret)
  6004. cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
  6005. plat_priv->rc_num, ret);
  6006. /* suspend/resume will trigger retain to re-establish link speed */
  6007. ret = cnss_suspend_pci_link(pci_priv);
  6008. if (ret)
  6009. cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
  6010. ret = cnss_resume_pci_link(pci_priv);
  6011. cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
  6012. }
  6013. }
  6014. #else
  6015. static void
  6016. cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
  6017. {
  6018. }
  6019. static void
  6020. cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
  6021. {
  6022. }
  6023. #endif
  6024. static int cnss_pci_probe(struct pci_dev *pci_dev,
  6025. const struct pci_device_id *id)
  6026. {
  6027. int ret = 0;
  6028. struct cnss_pci_data *pci_priv;
  6029. struct device *dev = &pci_dev->dev;
  6030. int rc_num = pci_dev->bus->domain_nr;
  6031. struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
  6032. cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
  6033. id->vendor, pci_dev->device, rc_num);
  6034. if (!plat_priv) {
  6035. cnss_pr_err("Find match plat_priv with rc number failure\n");
  6036. ret = -ENODEV;
  6037. goto out;
  6038. }
  6039. pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
  6040. if (!pci_priv) {
  6041. ret = -ENOMEM;
  6042. goto out;
  6043. }
  6044. pci_priv->pci_link_state = PCI_LINK_UP;
  6045. pci_priv->plat_priv = plat_priv;
  6046. pci_priv->pci_dev = pci_dev;
  6047. pci_priv->pci_device_id = id;
  6048. pci_priv->device_id = pci_dev->device;
  6049. cnss_set_pci_priv(pci_dev, pci_priv);
  6050. plat_priv->device_id = pci_dev->device;
  6051. plat_priv->bus_priv = pci_priv;
  6052. mutex_init(&pci_priv->bus_lock);
  6053. if (plat_priv->use_pm_domain)
  6054. dev->pm_domain = &cnss_pm_domain;
  6055. cnss_pci_restore_rc_speed(pci_priv);
  6056. ret = cnss_pci_get_dev_cfg_node(plat_priv);
  6057. if (ret) {
  6058. cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
  6059. goto reset_ctx;
  6060. }
  6061. cnss_get_sleep_clk_supported(plat_priv);
  6062. ret = cnss_dev_specific_power_on(plat_priv);
  6063. if (ret < 0)
  6064. goto reset_ctx;
  6065. cnss_pci_of_reserved_mem_device_init(pci_priv);
  6066. ret = cnss_register_subsys(plat_priv);
  6067. if (ret)
  6068. goto reset_ctx;
  6069. ret = cnss_register_ramdump(plat_priv);
  6070. if (ret)
  6071. goto unregister_subsys;
  6072. ret = cnss_pci_init_smmu(pci_priv);
  6073. if (ret)
  6074. goto unregister_ramdump;
  6075. /* update drv support flag */
  6076. cnss_pci_update_drv_supported(pci_priv);
  6077. cnss_update_supported_link_info(pci_priv);
  6078. ret = cnss_reg_pci_event(pci_priv);
  6079. if (ret) {
  6080. cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
  6081. goto deinit_smmu;
  6082. }
  6083. ret = cnss_pci_enable_bus(pci_priv);
  6084. if (ret)
  6085. goto dereg_pci_event;
  6086. ret = cnss_pci_enable_msi(pci_priv);
  6087. if (ret)
  6088. goto disable_bus;
  6089. ret = cnss_pci_register_mhi(pci_priv);
  6090. if (ret)
  6091. goto disable_msi;
  6092. switch (pci_dev->device) {
  6093. case QCA6174_DEVICE_ID:
  6094. pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
  6095. &pci_priv->revision_id);
  6096. break;
  6097. case QCA6290_DEVICE_ID:
  6098. case QCA6390_DEVICE_ID:
  6099. case QCN7605_DEVICE_ID:
  6100. case QCA6490_DEVICE_ID:
  6101. case KIWI_DEVICE_ID:
  6102. case MANGO_DEVICE_ID:
  6103. case PEACH_DEVICE_ID:
  6104. if ((cnss_is_dual_wlan_enabled() &&
  6105. plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
  6106. cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
  6107. false);
  6108. timer_setup(&pci_priv->dev_rddm_timer,
  6109. cnss_dev_rddm_timeout_hdlr, 0);
  6110. timer_setup(&pci_priv->boot_debug_timer,
  6111. cnss_boot_debug_timeout_hdlr, 0);
  6112. INIT_DELAYED_WORK(&pci_priv->time_sync_work,
  6113. cnss_pci_time_sync_work_hdlr);
  6114. cnss_pci_get_link_status(pci_priv);
  6115. cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
  6116. cnss_pci_wake_gpio_init(pci_priv);
  6117. break;
  6118. default:
  6119. cnss_pr_err("Unknown PCI device found: 0x%x\n",
  6120. pci_dev->device);
  6121. ret = -ENODEV;
  6122. goto unreg_mhi;
  6123. }
  6124. cnss_pci_config_regs(pci_priv);
  6125. if (EMULATION_HW)
  6126. goto out;
  6127. if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
  6128. goto probe_done;
  6129. cnss_pci_suspend_pwroff(pci_dev);
  6130. probe_done:
  6131. set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
  6132. return 0;
  6133. unreg_mhi:
  6134. cnss_pci_unregister_mhi(pci_priv);
  6135. disable_msi:
  6136. cnss_pci_disable_msi(pci_priv);
  6137. disable_bus:
  6138. cnss_pci_disable_bus(pci_priv);
  6139. dereg_pci_event:
  6140. cnss_dereg_pci_event(pci_priv);
  6141. deinit_smmu:
  6142. cnss_pci_deinit_smmu(pci_priv);
  6143. unregister_ramdump:
  6144. cnss_unregister_ramdump(plat_priv);
  6145. unregister_subsys:
  6146. cnss_unregister_subsys(plat_priv);
  6147. reset_ctx:
  6148. plat_priv->bus_priv = NULL;
  6149. out:
  6150. return ret;
  6151. }
  6152. static void cnss_pci_remove(struct pci_dev *pci_dev)
  6153. {
  6154. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  6155. struct cnss_plat_data *plat_priv =
  6156. cnss_bus_dev_to_plat_priv(&pci_dev->dev);
  6157. clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
  6158. cnss_pci_unregister_driver_hdlr(pci_priv);
  6159. cnss_pci_free_aux_mem(pci_priv);
  6160. cnss_pci_free_tme_lite_mem(pci_priv);
  6161. cnss_pci_free_m3_mem(pci_priv);
  6162. cnss_pci_free_fw_mem(pci_priv);
  6163. cnss_pci_free_qdss_mem(pci_priv);
  6164. switch (pci_dev->device) {
  6165. case QCA6290_DEVICE_ID:
  6166. case QCA6390_DEVICE_ID:
  6167. case QCN7605_DEVICE_ID:
  6168. case QCA6490_DEVICE_ID:
  6169. case KIWI_DEVICE_ID:
  6170. case MANGO_DEVICE_ID:
  6171. case PEACH_DEVICE_ID:
  6172. cnss_pci_wake_gpio_deinit(pci_priv);
  6173. del_timer(&pci_priv->boot_debug_timer);
  6174. del_timer(&pci_priv->dev_rddm_timer);
  6175. break;
  6176. default:
  6177. break;
  6178. }
  6179. cnss_pci_unregister_mhi(pci_priv);
  6180. cnss_pci_disable_msi(pci_priv);
  6181. cnss_pci_disable_bus(pci_priv);
  6182. cnss_dereg_pci_event(pci_priv);
  6183. cnss_pci_deinit_smmu(pci_priv);
  6184. if (plat_priv) {
  6185. cnss_unregister_ramdump(plat_priv);
  6186. cnss_unregister_subsys(plat_priv);
  6187. plat_priv->bus_priv = NULL;
  6188. } else {
  6189. cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
  6190. }
  6191. }
  6192. static const struct pci_device_id cnss_pci_id_table[] = {
  6193. { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6194. { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6195. { QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6196. { QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6197. { QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6198. { KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6199. { MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6200. { PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
  6201. { 0 }
  6202. };
  6203. MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
  6204. static const struct dev_pm_ops cnss_pm_ops = {
  6205. SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
  6206. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
  6207. cnss_pci_resume_noirq)
  6208. SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
  6209. cnss_pci_runtime_idle)
  6210. };
  6211. static struct pci_driver cnss_pci_driver = {
  6212. .name = "cnss_pci",
  6213. .id_table = cnss_pci_id_table,
  6214. .probe = cnss_pci_probe,
  6215. .remove = cnss_pci_remove,
  6216. .driver = {
  6217. .pm = &cnss_pm_ops,
  6218. },
  6219. };
  6220. static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
  6221. {
  6222. int ret, retry = 0;
  6223. /* Always set initial target PCIe link speed to Gen2 for QCA6490 device
  6224. * since there may be link issues if it boots up with Gen3 link speed.
  6225. * Device is able to change it later at any time. It will be rejected
  6226. * if requested speed is higher than the one specified in PCIe DT.
  6227. */
  6228. if (plat_priv->device_id == QCA6490_DEVICE_ID) {
  6229. ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
  6230. PCI_EXP_LNKSTA_CLS_5_0GB);
  6231. if (ret && ret != -EPROBE_DEFER)
  6232. cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
  6233. rc_num, ret);
  6234. } else {
  6235. cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
  6236. }
  6237. cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
  6238. retry:
  6239. ret = _cnss_pci_enumerate(plat_priv, rc_num);
  6240. if (ret) {
  6241. if (ret == -EPROBE_DEFER) {
  6242. cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
  6243. goto out;
  6244. }
  6245. cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
  6246. rc_num, ret);
  6247. if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
  6248. cnss_pr_dbg("Retry PCI link training #%d\n", retry);
  6249. goto retry;
  6250. } else {
  6251. goto out;
  6252. }
  6253. }
  6254. plat_priv->rc_num = rc_num;
  6255. out:
  6256. return ret;
  6257. }
  6258. int cnss_pci_init(struct cnss_plat_data *plat_priv)
  6259. {
  6260. struct device *dev = &plat_priv->plat_dev->dev;
  6261. const __be32 *prop;
  6262. int ret = 0, prop_len = 0, rc_count, i;
  6263. prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
  6264. if (!prop || !prop_len) {
  6265. cnss_pr_err("Failed to get PCIe RC number from DT\n");
  6266. goto out;
  6267. }
  6268. rc_count = prop_len / sizeof(__be32);
  6269. for (i = 0; i < rc_count; i++) {
  6270. ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
  6271. if (!ret)
  6272. break;
  6273. else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
  6274. goto out;
  6275. }
  6276. ret = cnss_try_suspend(plat_priv);
  6277. if (ret) {
  6278. cnss_pr_err("Failed to suspend, ret: %d\n", ret);
  6279. goto out;
  6280. }
  6281. if (!cnss_driver_registered) {
  6282. ret = pci_register_driver(&cnss_pci_driver);
  6283. if (ret) {
  6284. cnss_pr_err("Failed to register to PCI framework, err = %d\n",
  6285. ret);
  6286. goto out;
  6287. }
  6288. if (!plat_priv->bus_priv) {
  6289. cnss_pr_err("Failed to probe PCI driver\n");
  6290. ret = -ENODEV;
  6291. goto unreg_pci;
  6292. }
  6293. cnss_driver_registered = true;
  6294. }
  6295. return 0;
  6296. unreg_pci:
  6297. pci_unregister_driver(&cnss_pci_driver);
  6298. out:
  6299. return ret;
  6300. }
  6301. void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
  6302. {
  6303. if (cnss_driver_registered) {
  6304. pci_unregister_driver(&cnss_pci_driver);
  6305. cnss_driver_registered = false;
  6306. }
  6307. }