gsi.c 164 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/io.h>
  9. #include <linux/log2.h>
  10. #include <linux/module.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/delay.h>
  14. #include <linux/msi.h>
  15. #include <linux/smp.h>
  16. #include "gsi.h"
  17. #include "gsi_emulation.h"
  18. #include "gsihal.h"
  19. #include <asm/arch_timer.h>
  20. #include <linux/sched/clock.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/sched.h>
  23. #include <linux/wait.h>
  24. #include <linux/delay.h>
  25. #include <linux/version.h>
  26. #include <soc/qcom/minidump.h>
  27. #define CREATE_TRACE_POINTS
  28. #include "gsi_trace.h"
  29. #define GSI_CMD_TIMEOUT (5*HZ)
  30. #define GSI_FC_CMD_TIMEOUT (2*GSI_CMD_TIMEOUT)
  31. #define GSI_START_CMD_TIMEOUT_MS 1000
  32. #define GSI_CMD_POLL_CNT 5
  33. #define GSI_STOP_CMD_TIMEOUT_MS 200
  34. #define GSI_MAX_CH_LOW_WEIGHT 15
  35. #define GSI_IRQ_STORM_THR 5
  36. #define GSI_FC_MAX_TIMEOUT 5
  37. #define GSI_STOP_CMD_POLL_CNT 4
  38. #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
  39. #define GSI_RESET_WA_MIN_SLEEP 1000
  40. #define GSI_RESET_WA_MAX_SLEEP 2000
  41. #define GSI_CHNL_STATE_MAX_RETRYCNT 10
  42. #define GSI_STTS_REG_BITS 32
  43. #define GSI_MSB_MASK 0xFFFFFFFF00000000ULL
  44. #define GSI_LSB_MASK 0x00000000FFFFFFFFULL
  45. #define GSI_MSB(num) ((u32)((num & GSI_MSB_MASK) >> 32))
  46. #define GSI_LSB(num) ((u32)(num & GSI_LSB_MASK))
  47. #define GSI_FC_NUM_WORDS_PER_CHNL_SHRAM (20)
  48. #define GSI_FC_STATE_INDEX_SHRAM (7)
  49. #define GSI_FC_PENDING_MASK (0x00080000)
  50. #define GSI_NTN3_PENDING_DB_AFTER_RB_MASK 18
  51. #define GSI_NTN3_PENDING_DB_AFTER_RB_SHIFT 1
  52. /* FOR_SEQ_HIGH channel scratch: (((8 * (pipe_id * ctx_size + offset_lines)) + 4) / 4) */
  53. #define GSI_GSI_SHRAM_n_EP_FOR_SEQ_HIGH_N_GET(ep_id) (((8 * (ep_id * 10 + 9)) + 4) / 4)
  54. #ifndef CONFIG_DEBUG_FS
  55. void gsi_debugfs_init(void)
  56. {
  57. }
  58. #endif
  59. static const struct of_device_id msm_gsi_match[] = {
  60. { .compatible = "qcom,msm_gsi", },
  61. { },
  62. };
  63. #if defined(CONFIG_IPA_EMULATION)
  64. static bool running_emulation = true;
  65. #else
  66. static bool running_emulation;
  67. #endif
  68. struct gsi_ctx *gsi_ctx;
  69. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  70. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr);
  71. static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
  72. {
  73. uint32_t curr;
  74. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee);
  75. gsihal_write_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee,
  76. (curr & ~mask) | (val & mask));
  77. }
  78. static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
  79. {
  80. uint32_t curr;
  81. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee);
  82. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee,
  83. (curr & ~mask) | (val & mask));
  84. }
  85. static void __gsi_config_all_ch_irq(int ee, uint32_t mask, uint32_t val)
  86. {
  87. uint32_t curr, k, max_k;
  88. max_k = gsihal_get_bit_map_array_size();
  89. for (k = 0; k < max_k; k++)
  90. {
  91. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k);
  92. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k,
  93. (curr & ~mask) | (val & mask));
  94. }
  95. }
  96. static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
  97. {
  98. uint32_t curr;
  99. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee);
  100. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee,
  101. (curr & ~mask) | (val & mask));
  102. }
  103. static void __gsi_config_all_evt_irq(int ee, uint32_t mask, uint32_t val)
  104. {
  105. uint32_t curr, k, max_k;
  106. max_k = gsihal_get_bit_map_array_size();
  107. for (k = 0; k < max_k; k++)
  108. {
  109. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k);
  110. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k,
  111. (curr & ~mask) | (val & mask));
  112. }
  113. }
  114. static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
  115. {
  116. uint32_t curr;
  117. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  118. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee,
  119. (curr & ~mask) | (val & mask));
  120. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  121. curr, ((curr & ~mask) | (val & mask)));
  122. }
  123. static void __gsi_config_all_ieob_irq(int ee, uint32_t mask, uint32_t val)
  124. {
  125. uint32_t curr, k, max_k;
  126. max_k = gsihal_get_bit_map_array_size();
  127. for (k = 0; k < max_k; k++)
  128. {
  129. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  130. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  131. (curr & ~mask) | (val & mask));
  132. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  133. curr, ((curr & ~mask) | (val & mask)));
  134. }
  135. }
  136. static void __gsi_config_ieob_irq_k(int ee, uint32_t k, uint32_t mask, uint32_t val)
  137. {
  138. uint32_t curr;
  139. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  140. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  141. (curr & ~mask) | (val & mask));
  142. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  143. curr, ((curr & ~mask) | (val & mask)));
  144. }
  145. static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
  146. {
  147. uint32_t curr;
  148. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee);
  149. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee,
  150. (curr & ~mask) | (val & mask));
  151. }
  152. static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
  153. {
  154. uint32_t curr;
  155. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee);
  156. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee,
  157. (curr & ~mask) | (val & mask));
  158. }
  159. static void gsi_channel_state_change_wait(unsigned long chan_hdl,
  160. struct gsi_chan_ctx *ctx,
  161. uint32_t tm, enum gsi_ch_cmd_opcode op)
  162. {
  163. int poll_cnt;
  164. int gsi_pending_intr;
  165. int res;
  166. struct gsihal_reg_ctx_type_irq type;
  167. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  168. int ee = gsi_ctx->per.ee;
  169. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  170. int stop_in_proc_retry = 0;
  171. int stop_retry = 0;
  172. /*
  173. * Start polling the GSI channel for
  174. * duration = tm * GSI_CMD_POLL_CNT.
  175. * We need to do polling of gsi state for improving debugability
  176. * of gsi hw state.
  177. */
  178. for (poll_cnt = 0;
  179. poll_cnt < GSI_CMD_POLL_CNT;
  180. poll_cnt++) {
  181. res = wait_for_completion_timeout(&ctx->compl,
  182. msecs_to_jiffies(tm));
  183. /* Interrupt received, return */
  184. if (res != 0)
  185. return;
  186. gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ, ee, &type);
  187. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  188. gsi_pending_intr = gsihal_read_reg_nk(
  189. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k,
  190. ee, gsihal_get_ch_reg_idx(chan_hdl));
  191. } else {
  192. gsi_pending_intr = gsihal_read_reg_n(
  193. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  194. }
  195. if (gsi_ctx->per.ver == GSI_VER_1_0) {
  196. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  197. ee, chan_hdl, &ch_k_cntxt_0);
  198. curr_state = ch_k_cntxt_0.chstate;
  199. }
  200. /* Update the channel state only if interrupt was raised
  201. * on particular channel and also checking global interrupt
  202. * is raised for channel control.
  203. */
  204. if ((type.ch_ctrl) &&
  205. (gsi_pending_intr & gsihal_get_ch_reg_mask(chan_hdl))) {
  206. /*
  207. * Check channel state here in case the channel is
  208. * already started but interrupt is not yet received.
  209. */
  210. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  211. ee, chan_hdl, &ch_k_cntxt_0);
  212. curr_state = ch_k_cntxt_0.chstate;
  213. }
  214. if (op == GSI_CH_START) {
  215. if (curr_state == GSI_CHAN_STATE_STARTED ||
  216. curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  217. ctx->state = curr_state;
  218. return;
  219. }
  220. }
  221. if (op == GSI_CH_STOP) {
  222. if (curr_state == GSI_CHAN_STATE_STOPPED)
  223. stop_retry++;
  224. else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
  225. stop_in_proc_retry++;
  226. }
  227. /* if interrupt marked reg after poll count reaching to max
  228. * keep loop to continue reach max stop proc and max stop count.
  229. */
  230. if (stop_retry == 1 || stop_in_proc_retry == 1)
  231. poll_cnt = 0;
  232. /* If stop channel retry reached to max count
  233. * clear the pending interrupt, if channel already stopped.
  234. */
  235. if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
  236. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  237. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k,
  238. ee, gsihal_get_ch_reg_idx(chan_hdl),
  239. gsi_pending_intr);
  240. }
  241. else {
  242. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR,
  243. ee,
  244. gsi_pending_intr);
  245. }
  246. ctx->state = curr_state;
  247. return;
  248. }
  249. /* If channel state stop in progress case no need
  250. * to wait for long time.
  251. */
  252. if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
  253. ctx->state = curr_state;
  254. return;
  255. }
  256. GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
  257. chan_hdl,
  258. type,
  259. ctx->state,
  260. gsi_pending_intr);
  261. }
  262. GSIDBG("invalidating the channel state when timeout happens\n");
  263. ctx->state = curr_state;
  264. }
  265. static void gsi_handle_ch_ctrl(int ee)
  266. {
  267. uint32_t ch;
  268. int i, k, max_k;
  269. uint32_t ch_hdl;
  270. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  271. struct gsi_chan_ctx *ctx;
  272. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  273. max_k = gsihal_get_bit_map_array_size();
  274. for (k = 0; k < max_k; k++) {
  275. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k, ee, k);
  276. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k, ee, k, ch);
  277. GSIDBG("ch %x\n", ch);
  278. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  279. if ((1 << i) & ch) {
  280. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  281. if (ch_hdl >= gsi_ctx->max_ch ||
  282. ch_hdl >= GSI_CHAN_MAX) {
  283. GSIERR("invalid channel %d\n",
  284. ch_hdl);
  285. break;
  286. }
  287. ctx = &gsi_ctx->chan[ch_hdl];
  288. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  289. ee, ch_hdl, &ch_k_cntxt_0);
  290. ctx->state = ch_k_cntxt_0.chstate;
  291. GSIDBG("ch %u state updated to %u\n",
  292. ch_hdl, ctx->state);
  293. complete(&ctx->compl);
  294. gsi_ctx->ch_dbg[ch_hdl].cmd_completed++;
  295. }
  296. }
  297. }
  298. } else {
  299. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  300. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR, ee, ch);
  301. GSIDBG("ch %x\n", ch);
  302. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  303. if ((1 << i) & ch) {
  304. if (i >= gsi_ctx->max_ch ||
  305. i >= GSI_CHAN_MAX) {
  306. GSIERR("invalid channel %d\n", i);
  307. break;
  308. }
  309. ctx = &gsi_ctx->chan[i];
  310. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  311. ee, i, &ch_k_cntxt_0);
  312. ctx->state = ch_k_cntxt_0.chstate;
  313. GSIDBG("ch %u state updated to %u\n", i,
  314. ctx->state);
  315. complete(&ctx->compl);
  316. gsi_ctx->ch_dbg[i].cmd_completed++;
  317. }
  318. }
  319. }
  320. }
  321. static void gsi_handle_ev_ctrl(int ee)
  322. {
  323. uint32_t ch;
  324. int i, k;
  325. uint32_t evt_hdl, max_k;
  326. struct gsi_evt_ctx *ctx;
  327. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  328. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  329. max_k = gsihal_get_bit_map_array_size();
  330. for (k = 0; k < max_k; k++) {
  331. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_k, ee, k);
  332. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  333. GSIDBG("ev %x\n", ch);
  334. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  335. if ((1 << i) & ch) {
  336. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  337. if (evt_hdl >= gsi_ctx->max_ev ||
  338. evt_hdl >= GSI_EVT_RING_MAX) {
  339. GSIERR("invalid event %d\n",
  340. evt_hdl);
  341. break;
  342. }
  343. ctx = &gsi_ctx->evtr[evt_hdl];
  344. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  345. ee, evt_hdl, &ev_ch_k_cntxt_0);
  346. ctx->state = ev_ch_k_cntxt_0.chstate;
  347. GSIDBG("evt %u state updated to %u\n",
  348. evt_hdl, ctx->state);
  349. complete(&ctx->compl);
  350. }
  351. }
  352. }
  353. } else {
  354. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ, ee);
  355. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR, ee, ch);
  356. GSIDBG("ev %x\n", ch);
  357. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  358. if ((1 << i) & ch) {
  359. if (i >= gsi_ctx->max_ev ||
  360. i >= GSI_EVT_RING_MAX) {
  361. GSIERR("invalid event %d\n", i);
  362. break;
  363. }
  364. ctx = &gsi_ctx->evtr[i];
  365. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  366. ee, i, &ev_ch_k_cntxt_0);
  367. ctx->state = ev_ch_k_cntxt_0.chstate;
  368. GSIDBG("evt %u state updated to %u\n", i,
  369. ctx->state);
  370. complete(&ctx->compl);
  371. }
  372. }
  373. }
  374. }
  375. static void gsi_handle_glob_err(uint32_t err)
  376. {
  377. struct gsi_log_err *log;
  378. struct gsi_chan_ctx *ch;
  379. struct gsi_evt_ctx *ev;
  380. struct gsi_chan_err_notify chan_notify;
  381. struct gsi_evt_err_notify evt_notify;
  382. struct gsi_per_notify per_notify;
  383. enum gsi_err_type err_type;
  384. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  385. log = (struct gsi_log_err *)&err;
  386. GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
  387. log->virt_idx);
  388. GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
  389. log->arg2, log->arg3);
  390. err_type = log->err_type;
  391. /*
  392. * These are errors thrown by hardware. We need
  393. * BUG_ON() to capture the hardware state right
  394. * when it is unexpected.
  395. */
  396. switch (err_type) {
  397. case GSI_ERR_TYPE_GLOB:
  398. per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
  399. per_notify.user_data = gsi_ctx->per.user_data;
  400. per_notify.data.err_desc = err & 0xFFFF;
  401. gsi_ctx->per.notify_cb(&per_notify);
  402. break;
  403. case GSI_ERR_TYPE_CHAN:
  404. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
  405. GSIERR("Unexpected ch %d\n", log->virt_idx);
  406. return;
  407. }
  408. ch = &gsi_ctx->chan[log->virt_idx];
  409. chan_notify.chan_user_data = ch->props.chan_user_data;
  410. chan_notify.err_desc = err & 0xFFFF;
  411. if (log->code == GSI_INVALID_TRE_ERR) {
  412. if (log->ee != gsi_ctx->per.ee) {
  413. GSIERR("unexpected EE in event %d\n", log->ee);
  414. GSI_ASSERT();
  415. }
  416. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  417. gsi_ctx->per.ee, log->virt_idx, &ch_k_cntxt_0);
  418. ch->state = ch_k_cntxt_0.chstate;
  419. GSIDBG("ch %u state updated to %u\n", log->virt_idx,
  420. ch->state);
  421. ch->stats.invalid_tre_error++;
  422. if (ch->state == GSI_CHAN_STATE_ERROR) {
  423. GSIERR("Unexpected channel state %d\n",
  424. ch->state);
  425. GSI_ASSERT();
  426. }
  427. chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
  428. } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  429. if (log->ee != gsi_ctx->per.ee) {
  430. GSIERR("unexpected EE in event %d\n", log->ee);
  431. GSI_ASSERT();
  432. }
  433. chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
  434. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  435. if (log->ee != gsi_ctx->per.ee) {
  436. GSIERR("unexpected EE in event %d\n", log->ee);
  437. GSI_ASSERT();
  438. }
  439. chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
  440. complete(&ch->compl);
  441. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  442. chan_notify.evt_id =
  443. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
  444. } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
  445. if (log->ee != gsi_ctx->per.ee) {
  446. GSIERR("unexpected EE in event %d\n", log->ee);
  447. GSI_ASSERT();
  448. }
  449. chan_notify.evt_id =
  450. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
  451. } else if (log->code == GSI_HWO_1_ERR) {
  452. if (log->ee != gsi_ctx->per.ee) {
  453. GSIERR("unexpected EE in event %d\n", log->ee);
  454. GSI_ASSERT();
  455. }
  456. chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
  457. } else {
  458. GSIERR("unexpected event log code %d\n", log->code);
  459. GSI_ASSERT();
  460. }
  461. ch->props.err_cb(&chan_notify);
  462. break;
  463. case GSI_ERR_TYPE_EVT:
  464. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
  465. GSIERR("Unexpected ev %d\n", log->virt_idx);
  466. return;
  467. }
  468. ev = &gsi_ctx->evtr[log->virt_idx];
  469. evt_notify.user_data = ev->props.user_data;
  470. evt_notify.err_desc = err & 0xFFFF;
  471. if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  472. if (log->ee != gsi_ctx->per.ee) {
  473. GSIERR("unexpected EE in event %d\n", log->ee);
  474. GSI_ASSERT();
  475. }
  476. evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
  477. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  478. if (log->ee != gsi_ctx->per.ee) {
  479. GSIERR("unexpected EE in event %d\n", log->ee);
  480. GSI_ASSERT();
  481. }
  482. evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
  483. complete(&ev->compl);
  484. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  485. evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
  486. } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
  487. if (log->ee != gsi_ctx->per.ee) {
  488. GSIERR("unexpected EE in event %d\n", log->ee);
  489. GSI_ASSERT();
  490. }
  491. evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
  492. } else {
  493. GSIERR("unexpected event log code %d\n", log->code);
  494. GSI_ASSERT();
  495. }
  496. ev->props.err_cb(&evt_notify);
  497. break;
  498. }
  499. }
  500. static void gsi_handle_gp_int1(void)
  501. {
  502. complete(&gsi_ctx->gen_ee_cmd_compl);
  503. }
  504. static void gsi_handle_glob_ee(int ee)
  505. {
  506. uint32_t val;
  507. uint32_t err;
  508. struct gsi_per_notify notify;
  509. uint32_t clr = ~0;
  510. struct gsihal_reg_cntxt_glob_irq_stts cntxt_glob_irq_stts;
  511. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GLOB_IRQ_STTS,
  512. ee, &cntxt_glob_irq_stts);
  513. notify.user_data = gsi_ctx->per.user_data;
  514. if(cntxt_glob_irq_stts.error_int) {
  515. err = gsihal_read_reg_n(GSI_EE_n_ERROR_LOG, ee);
  516. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  517. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, ee, 0);
  518. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG_CLR, ee, clr);
  519. gsi_handle_glob_err(err);
  520. }
  521. if (cntxt_glob_irq_stts.gp_int1)
  522. gsi_handle_gp_int1();
  523. if (cntxt_glob_irq_stts.gp_int2) {
  524. notify.evt_id = GSI_PER_EVT_GLOB_GP2;
  525. gsi_ctx->per.notify_cb(&notify);
  526. }
  527. if (cntxt_glob_irq_stts.gp_int3) {
  528. notify.evt_id = GSI_PER_EVT_GLOB_GP3;
  529. gsi_ctx->per.notify_cb(&notify);
  530. }
  531. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_CLR, ee, val);
  532. }
  533. static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
  534. {
  535. ctx->wp_local += ctx->elem_sz;
  536. if (ctx->wp_local == ctx->end)
  537. ctx->wp_local = ctx->base;
  538. }
  539. static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
  540. {
  541. ctx->rp_local += ctx->elem_sz;
  542. if (ctx->rp_local == ctx->end)
  543. ctx->rp_local = ctx->base;
  544. }
  545. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
  546. {
  547. WARN_ON(addr < ctx->base || addr >= ctx->end);
  548. return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
  549. }
  550. static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
  551. uint64_t addr2)
  552. {
  553. uint32_t addr_diff;
  554. GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
  555. ctx->base, ctx->end);
  556. if (addr1 < ctx->base || addr1 >= ctx->end) {
  557. GSIERR("address = 0x%llx not in range\n", addr1);
  558. GSI_ASSERT();
  559. }
  560. if (addr2 < ctx->base || addr2 >= ctx->end) {
  561. GSIERR("address = 0x%llx not in range\n", addr2);
  562. GSI_ASSERT();
  563. }
  564. addr_diff = (uint32_t)(addr2 - addr1);
  565. if (addr1 < addr2)
  566. return addr_diff / ctx->elem_sz;
  567. else
  568. return (addr_diff + ctx->len) / ctx->elem_sz;
  569. }
  570. static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
  571. struct gsi_chan_xfer_notify *notify, bool callback)
  572. {
  573. uint32_t ch_id;
  574. struct gsi_chan_ctx *ch_ctx;
  575. uint16_t rp_idx;
  576. uint64_t rp;
  577. ch_id = evt->chid;
  578. if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
  579. GSIERR("Unexpected ch %d\n", ch_id);
  580. return;
  581. }
  582. ch_ctx = &gsi_ctx->chan[ch_id];
  583. if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
  584. ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
  585. return;
  586. if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
  587. rp = evt->xfer_ptr;
  588. if (ch_ctx->ring.rp_local != rp) {
  589. ch_ctx->stats.completed +=
  590. gsi_get_complete_num(&ch_ctx->ring,
  591. ch_ctx->ring.rp_local, rp);
  592. ch_ctx->ring.rp_local = rp;
  593. }
  594. /*
  595. * Increment RP local only in polling context to avoid
  596. * sys len mismatch.
  597. */
  598. if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
  599. !ch_ctx->props.tx_poll))
  600. /* the element at RP is also processed */
  601. gsi_incr_ring_rp(&ch_ctx->ring);
  602. ch_ctx->ring.rp = ch_ctx->ring.rp_local;
  603. rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
  604. notify->veid = GSI_VEID_DEFAULT;
  605. } else {
  606. rp_idx = evt->cookie;
  607. notify->veid = evt->veid;
  608. }
  609. WARN_ON(!ch_ctx->user_data[rp_idx].valid);
  610. notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
  611. /*
  612. * In suspend just before stopping the channel possible to receive
  613. * the IEOB interrupt and xfer pointer will not be processed in this
  614. * mode and moving channel poll mode. In resume after starting the
  615. * channel will receive the IEOB interrupt and xfer pointer will be
  616. * overwritten. To avoid this process all data in polling context.
  617. */
  618. if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
  619. !ch_ctx->props.tx_poll)) {
  620. ch_ctx->stats.completed++;
  621. ch_ctx->user_data[rp_idx].valid = false;
  622. }
  623. notify->chan_user_data = ch_ctx->props.chan_user_data;
  624. notify->evt_id = evt->code;
  625. notify->bytes_xfered = evt->len;
  626. if (callback) {
  627. if (atomic_read(&ch_ctx->poll_mode)) {
  628. GSIERR("Calling client callback in polling mode\n");
  629. WARN_ON(1);
  630. }
  631. ch_ctx->props.xfer_cb(notify);
  632. }
  633. }
  634. static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
  635. struct gsi_chan_xfer_notify *notify, bool callback)
  636. {
  637. struct gsi_xfer_compl_evt *evt;
  638. struct gsi_chan_ctx *ch_ctx;
  639. evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
  640. ctx->ring.rp_local - ctx->ring.base);
  641. gsi_process_chan(evt, notify, callback);
  642. /*
  643. * Increment RP local only in polling context to avoid
  644. * sys len mismatch.
  645. */
  646. ch_ctx = &gsi_ctx->chan[evt->chid];
  647. if (callback && (ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI ||
  648. ch_ctx->props.tx_poll))
  649. return;
  650. gsi_incr_ring_rp(&ctx->ring);
  651. /* recycle this element */
  652. gsi_incr_ring_wp(&ctx->ring);
  653. ctx->stats.completed++;
  654. }
  655. static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
  656. {
  657. uint32_t val;
  658. ctx->ring.wp = ctx->ring.wp_local;
  659. val = GSI_LSB(ctx->ring.wp_local);
  660. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_DOORBELL_0,
  661. gsi_ctx->per.ee, ctx->id, val);
  662. }
  663. void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl) {
  664. struct gsi_evt_ctx *ctx;
  665. ctx = gsi_ctx->chan[chan_hdl].evtr;
  666. gsi_ring_evt_doorbell(ctx);
  667. }
  668. EXPORT_SYMBOL(gsi_ring_evt_doorbell_polling_mode);
  669. static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
  670. {
  671. uint32_t val;
  672. /*
  673. * allocate new events for this channel first
  674. * before submitting the new TREs.
  675. * for TO_GSI channels the event ring doorbell is rang as part of
  676. * interrupt handling.
  677. */
  678. if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  679. gsi_ring_evt_doorbell(ctx->evtr);
  680. ctx->ring.wp = ctx->ring.wp_local;
  681. val = GSI_LSB(ctx->ring.wp_local);
  682. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  683. gsi_ctx->per.ee, ctx->props.ch_id, val);
  684. }
  685. static bool check_channel_polling(struct gsi_evt_ctx* ctx) {
  686. /* For shared event rings both channels will be marked */
  687. return atomic_read(&ctx->chan[0]->poll_mode);
  688. }
  689. static void gsi_handle_ieob(int ee)
  690. {
  691. uint32_t ch, evt_hdl;
  692. int i, k, max_k;
  693. uint64_t rp;
  694. struct gsi_evt_ctx *ctx;
  695. struct gsi_chan_xfer_notify notify;
  696. unsigned long flags;
  697. unsigned long cntr;
  698. uint32_t msk;
  699. bool empty;
  700. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  701. max_k = gsihal_get_bit_map_array_size();
  702. for (k = 0; k < max_k; k++) {
  703. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_k, ee, k);
  704. msk = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  705. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee, k, ch & msk);
  706. if (trace_gsi_qtimer_enabled())
  707. {
  708. uint64_t qtimer = 0;
  709. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
  710. qtimer = arch_timer_read_cntpct_el0();
  711. #endif
  712. trace_gsi_qtimer(qtimer, false, 0, ch, msk);
  713. }
  714. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  715. if ((1 << i) & ch & msk) {
  716. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  717. if (evt_hdl >= gsi_ctx->max_ev ||
  718. evt_hdl >= GSI_EVT_RING_MAX) {
  719. GSIERR("invalid event %d\n",
  720. evt_hdl);
  721. break;
  722. }
  723. ctx = &gsi_ctx->evtr[evt_hdl];
  724. /*
  725. * Don't handle MSI interrupts, only handle IEOB
  726. * IRQs
  727. */
  728. if (ctx->props.intr == GSI_INTR_MSI)
  729. continue;
  730. if (ctx->props.intf !=
  731. GSI_EVT_CHTYPE_GPI_EV) {
  732. GSIERR("Unexpected irq intf %d\n",
  733. ctx->props.intf);
  734. GSI_ASSERT();
  735. }
  736. spin_lock_irqsave(&ctx->ring.slock,
  737. flags);
  738. check_again_v3_0:
  739. cntr = 0;
  740. empty = true;
  741. rp = ctx->props.gsi_read_event_ring_rp(
  742. &ctx->props, ctx->id, ee);
  743. rp |= ctx->ring.rp & GSI_MSB_MASK;
  744. ctx->ring.rp = rp;
  745. while (ctx->ring.rp_local != rp) {
  746. ++cntr;
  747. if (check_channel_polling(ctx)) {
  748. cntr = 0;
  749. break;
  750. }
  751. gsi_process_evt_re(ctx, &notify,
  752. true);
  753. empty = false;
  754. }
  755. if (!empty)
  756. gsi_ring_evt_doorbell(ctx);
  757. if (cntr != 0)
  758. goto check_again_v3_0;
  759. spin_unlock_irqrestore(&ctx->ring.slock,
  760. flags);
  761. }
  762. }
  763. }
  764. } else {
  765. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ, ee);
  766. msk = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  767. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, ch & msk);
  768. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  769. if ((1 << i) & ch & msk) {
  770. if (i >= gsi_ctx->max_ev ||
  771. i >= GSI_EVT_RING_MAX) {
  772. GSIERR("invalid event %d\n", i);
  773. break;
  774. }
  775. ctx = &gsi_ctx->evtr[i];
  776. /*
  777. * Don't handle MSI interrupts, only handle IEOB
  778. * IRQs
  779. */
  780. if (ctx->props.intr == GSI_INTR_MSI)
  781. continue;
  782. if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  783. GSIERR("Unexpected irq intf %d\n",
  784. ctx->props.intf);
  785. GSI_ASSERT();
  786. }
  787. spin_lock_irqsave(&ctx->ring.slock, flags);
  788. check_again:
  789. cntr = 0;
  790. empty = true;
  791. rp = ctx->props.gsi_read_event_ring_rp(
  792. &ctx->props, ctx->id, ee);
  793. rp |= ctx->ring.rp & GSI_MSB_MASK;
  794. ctx->ring.rp = rp;
  795. while (ctx->ring.rp_local != rp) {
  796. ++cntr;
  797. if (check_channel_polling(ctx)) {
  798. cntr = 0;
  799. break;
  800. }
  801. gsi_process_evt_re(ctx, &notify, true);
  802. empty = false;
  803. }
  804. if (!empty)
  805. gsi_ring_evt_doorbell(ctx);
  806. if (cntr != 0)
  807. goto check_again;
  808. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  809. }
  810. }
  811. }
  812. }
  813. static void gsi_handle_inter_ee_ch_ctrl(int ee)
  814. {
  815. uint32_t ch, ch_hdl;
  816. int i, k, max_k;
  817. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  818. max_k = gsihal_get_bit_map_array_size();
  819. for (k = 0; k < max_k; k++) {
  820. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k);
  821. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k, ch);
  822. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  823. if ((1 << i) & ch) {
  824. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  825. /* not currently expected */
  826. GSIERR("ch %u was inter-EE changed\n", ch_hdl);
  827. }
  828. }
  829. }
  830. } else {
  831. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee);
  832. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee, ch);
  833. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  834. if ((1 << i) & ch) {
  835. /* not currently expected */
  836. GSIERR("ch %u was inter-EE changed\n", i);
  837. }
  838. }
  839. }
  840. }
  841. static void gsi_handle_inter_ee_ev_ctrl(int ee)
  842. {
  843. uint32_t ch, evt_hdl;
  844. int i, k, max_k;
  845. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  846. max_k = gsihal_get_bit_map_array_size();
  847. for (k = 0; k < max_k; k++) {
  848. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_k, ee, k);
  849. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  850. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  851. if ((1 << i) & ch) {
  852. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  853. /* not currently expected */
  854. GSIERR("evt %u was inter-EE changed\n",
  855. evt_hdl);
  856. }
  857. }
  858. }
  859. } else {
  860. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ, ee);
  861. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR, ee, ch);
  862. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  863. if ((1 << i) & ch) {
  864. /* not currently expected */
  865. GSIERR("evt %u was inter-EE changed\n", i);
  866. }
  867. }
  868. }
  869. }
  870. static void gsi_handle_general(int ee)
  871. {
  872. uint32_t val;
  873. struct gsi_per_notify notify;
  874. struct gsihal_reg_cntxt_gsi_irq_stts gsi_irq_stts;
  875. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_STTS,
  876. ee, &gsi_irq_stts);
  877. notify.user_data = gsi_ctx->per.user_data;
  878. if (gsi_irq_stts.gsi_mcs_stack_ovrflow)
  879. notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
  880. if (gsi_irq_stts.gsi_cmd_fifo_ovrflow)
  881. notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
  882. if (gsi_irq_stts.gsi_bus_error)
  883. notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
  884. if (gsi_irq_stts.gsi_break_point)
  885. notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
  886. if (gsi_ctx->per.notify_cb)
  887. gsi_ctx->per.notify_cb(&notify);
  888. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_CLR, ee, val);
  889. }
  890. static void gsi_handle_irq(void)
  891. {
  892. uint32_t type;
  893. int ee = gsi_ctx->per.ee;
  894. int index;
  895. struct gsihal_reg_ctx_type_irq ctx_type_irq;
  896. while (1) {
  897. if (!gsi_ctx->per.clk_status_cb())
  898. break;
  899. type = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ,
  900. ee, &ctx_type_irq);
  901. if (!type)
  902. break;
  903. GSIDBG_LOW("type 0x%x\n", type);
  904. index = gsi_ctx->gsi_isr_cache_index;
  905. gsi_ctx->gsi_isr_cache[index].timestamp =
  906. sched_clock();
  907. gsi_ctx->gsi_isr_cache[index].qtimer =
  908. __arch_counter_get_cntvct();
  909. gsi_ctx->gsi_isr_cache[index].interrupt_type = type;
  910. gsi_ctx->gsi_isr_cache_index++;
  911. if (gsi_ctx->gsi_isr_cache_index == GSI_ISR_CACHE_MAX)
  912. gsi_ctx->gsi_isr_cache_index = 0;
  913. if(ctx_type_irq.ch_ctrl) {
  914. gsi_handle_ch_ctrl(ee);
  915. break;
  916. }
  917. if (ctx_type_irq.ev_ctrl) {
  918. gsi_handle_ev_ctrl(ee);
  919. break;
  920. }
  921. if (ctx_type_irq.glob_ee)
  922. gsi_handle_glob_ee(ee);
  923. if (ctx_type_irq.ieob)
  924. gsi_handle_ieob(ee);
  925. if (ctx_type_irq.inter_ee_ch_ctrl)
  926. gsi_handle_inter_ee_ch_ctrl(ee);
  927. if (ctx_type_irq.inter_ee_ev_ctrl)
  928. gsi_handle_inter_ee_ev_ctrl(ee);
  929. if (ctx_type_irq.general)
  930. gsi_handle_general(ee);
  931. }
  932. }
  933. static irqreturn_t gsi_isr(int irq, void *ctxt)
  934. {
  935. if (gsi_ctx->per.req_clk_cb) {
  936. bool granted = false;
  937. gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
  938. if (granted) {
  939. gsi_handle_irq();
  940. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  941. }
  942. } else if (!gsi_ctx->per.clk_status_cb()) {
  943. /* we only want to capture the gsi isr storm here */
  944. if (atomic_read(&gsi_ctx->num_unclock_irq) ==
  945. GSI_IRQ_STORM_THR)
  946. gsi_ctx->per.enable_clk_bug_on();
  947. atomic_inc(&gsi_ctx->num_unclock_irq);
  948. return IRQ_HANDLED;
  949. } else {
  950. atomic_set(&gsi_ctx->num_unclock_irq, 0);
  951. gsi_handle_irq();
  952. }
  953. return IRQ_HANDLED;
  954. }
  955. static irqreturn_t gsi_msi_isr(int irq, void *ctxt)
  956. {
  957. int ee = gsi_ctx->per.ee;
  958. uint64_t rp;
  959. struct gsi_chan_xfer_notify notify;
  960. unsigned long flags;
  961. unsigned long cntr;
  962. bool empty;
  963. uint8_t evt;
  964. unsigned long msi;
  965. struct gsi_evt_ctx *evt_ctxt;
  966. /* Determine which event channel to handle */
  967. for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
  968. if (gsi_ctx->msi.irq[msi] == irq)
  969. break;
  970. }
  971. evt = gsi_ctx->msi.evt[msi];
  972. evt_ctxt = &gsi_ctx->evtr[evt];
  973. if (trace_gsi_qtimer_enabled()) {
  974. uint64_t qtimer = 0;
  975. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
  976. qtimer = arch_timer_read_cntpct_el0();
  977. #endif
  978. trace_gsi_qtimer(qtimer, true, evt, 0, 0);
  979. }
  980. if (evt_ctxt->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  981. GSIERR("Unexpected irq intf %d\n",
  982. evt_ctxt->props.intf);
  983. GSI_ASSERT();
  984. }
  985. /* Clearing IEOB irq if there are any genereated for MSI channel */
  986. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
  987. gsihal_get_ch_reg_idx(evt_ctxt->id),
  988. gsihal_get_ch_reg_mask(evt_ctxt->id));
  989. spin_lock_irqsave(&evt_ctxt->ring.slock, flags);
  990. check_again:
  991. cntr = 0;
  992. empty = true;
  993. rp = evt_ctxt->props.gsi_read_event_ring_rp(&evt_ctxt->props,
  994. evt_ctxt->id, ee);
  995. rp |= evt_ctxt->ring.rp & 0xFFFFFFFF00000000;
  996. evt_ctxt->ring.rp = rp;
  997. while (evt_ctxt->ring.rp_local != rp) {
  998. ++cntr;
  999. if (evt_ctxt->props.exclusive &&
  1000. atomic_read(&evt_ctxt->chan[0]->poll_mode)) {
  1001. cntr = 0;
  1002. break;
  1003. }
  1004. gsi_process_evt_re(evt_ctxt, &notify, true);
  1005. empty = false;
  1006. }
  1007. if (!empty)
  1008. gsi_ring_evt_doorbell(evt_ctxt);
  1009. if (cntr != 0)
  1010. goto check_again;
  1011. spin_unlock_irqrestore(&evt_ctxt->ring.slock, flags);
  1012. return IRQ_HANDLED;
  1013. }
  1014. static uint32_t gsi_get_max_channels(enum gsi_ver ver)
  1015. {
  1016. uint32_t max_ch = 0;
  1017. struct gsihal_reg_hw_param hw_param;
  1018. struct gsihal_reg_hw_param2 hw_param2;
  1019. switch (ver) {
  1020. case GSI_VER_ERR:
  1021. case GSI_VER_MAX:
  1022. GSIERR("GSI version is not supported %d\n", ver);
  1023. WARN_ON(1);
  1024. break;
  1025. case GSI_VER_1_0:
  1026. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  1027. gsi_ctx->per.ee, &hw_param);
  1028. max_ch = hw_param.gsi_ch_num;
  1029. break;
  1030. case GSI_VER_1_2:
  1031. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  1032. gsi_ctx->per.ee, &hw_param);
  1033. max_ch = hw_param.gsi_ch_num;
  1034. break;
  1035. default:
  1036. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  1037. gsi_ctx->per.ee, &hw_param2);
  1038. max_ch = hw_param2.gsi_num_ch_per_ee;
  1039. break;
  1040. }
  1041. GSIDBG("max channels %d\n", max_ch);
  1042. return max_ch;
  1043. }
  1044. static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
  1045. {
  1046. uint32_t max_ev = 0;
  1047. struct gsihal_reg_hw_param hw_param;
  1048. struct gsihal_reg_hw_param2 hw_param2;
  1049. struct gsihal_reg_hw_param4 hw_param4;
  1050. switch (ver) {
  1051. case GSI_VER_ERR:
  1052. case GSI_VER_MAX:
  1053. GSIERR("GSI version is not supported %d\n", ver);
  1054. WARN_ON(1);
  1055. break;
  1056. case GSI_VER_1_0:
  1057. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  1058. gsi_ctx->per.ee, &hw_param);
  1059. max_ev = hw_param.gsi_ev_ch_num;
  1060. break;
  1061. case GSI_VER_1_2:
  1062. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  1063. gsi_ctx->per.ee, &hw_param);
  1064. max_ev = hw_param.gsi_ev_ch_num;
  1065. break;
  1066. case GSI_VER_3_0:
  1067. case GSI_VER_5_2:
  1068. case GSI_VER_5_5:
  1069. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_4,
  1070. gsi_ctx->per.ee, &hw_param4);
  1071. max_ev = hw_param4.gsi_num_ev_per_ee;
  1072. break;
  1073. default:
  1074. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  1075. gsi_ctx->per.ee, &hw_param2);
  1076. max_ev = hw_param2.gsi_num_ev_per_ee;
  1077. break;
  1078. }
  1079. GSIDBG("max event rings %d\n", max_ev);
  1080. return max_ev;
  1081. }
  1082. int gsi_complete_clk_grant(unsigned long dev_hdl)
  1083. {
  1084. unsigned long flags;
  1085. if (!gsi_ctx) {
  1086. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1087. return -GSI_STATUS_NODEV;
  1088. }
  1089. if (!gsi_ctx->per_registered) {
  1090. GSIERR("no client registered\n");
  1091. return -GSI_STATUS_INVALID_PARAMS;
  1092. }
  1093. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1094. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1095. gsi_ctx);
  1096. return -GSI_STATUS_INVALID_PARAMS;
  1097. }
  1098. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1099. gsi_handle_irq();
  1100. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  1101. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1102. return GSI_STATUS_SUCCESS;
  1103. }
  1104. EXPORT_SYMBOL(gsi_complete_clk_grant);
  1105. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  1106. {
  1107. if (!gsi_ctx) {
  1108. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1109. return -GSI_STATUS_NODEV;
  1110. }
  1111. gsi_ctx->base = devm_ioremap(
  1112. gsi_ctx->dev, gsi_base_addr, gsi_size);
  1113. if (!gsi_ctx->base) {
  1114. GSIERR("failed to map access to GSI HW\n");
  1115. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1116. }
  1117. GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
  1118. &gsi_base_addr,
  1119. gsi_ctx->base,
  1120. gsi_size);
  1121. /* initialize HAL before accessing any register */
  1122. gsihal_init(ver, gsi_ctx->base);
  1123. return 0;
  1124. }
  1125. EXPORT_SYMBOL(gsi_map_base);
  1126. int gsi_unmap_base(void)
  1127. {
  1128. if (!gsi_ctx) {
  1129. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1130. return -GSI_STATUS_NODEV;
  1131. }
  1132. if (!gsi_ctx->base) {
  1133. GSIERR("access to GSI HW has not been mapped\n");
  1134. return -GSI_STATUS_INVALID_PARAMS;
  1135. }
  1136. devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
  1137. gsi_ctx->base = NULL;
  1138. return 0;
  1139. }
  1140. EXPORT_SYMBOL(gsi_unmap_base);
  1141. static void __gsi_msi_write_msg(struct msi_desc *desc, struct msi_msg *msg)
  1142. {
  1143. u16 msi = 0;
  1144. if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(msg) || IS_ERR_OR_NULL(gsi_ctx))
  1145. BUG();
  1146. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
  1147. msi = desc->msi_index;
  1148. #else
  1149. msi = desc->platform.msi_index;
  1150. #endif
  1151. /* MSI should be valid and unallocated */
  1152. if ((msi >= gsi_ctx->msi.num) || (test_bit(msi, gsi_ctx->msi.allocated)))
  1153. BUG();
  1154. /* Save the message for later use */
  1155. memcpy(&gsi_ctx->msi.msg[msi], msg, sizeof(*msg));
  1156. dev_notice(gsi_ctx->dev,
  1157. "saved msi %u msg data %u addr 0x%08x%08x\n", msi,
  1158. msg->data, msg->address_hi, msg->address_lo);
  1159. /* Single MSI control is used. So MSI address will be same. */
  1160. if (!gsi_ctx->msi_addr_set) {
  1161. gsi_ctx->msi_addr = gsi_ctx->msi.msg[msi].address_hi;
  1162. gsi_ctx->msi_addr = (gsi_ctx->msi_addr << 32) |
  1163. gsi_ctx->msi.msg[msi].address_lo;
  1164. gsi_ctx->msi_addr_set = true;
  1165. }
  1166. GSIDBG("saved msi %u msg data %u addr 0x%08x%08x, MSI:0x%lx\n", msi,
  1167. msg->data, msg->address_hi, msg->address_lo, gsi_ctx->msi_addr);
  1168. }
  1169. static int __gsi_request_msi_irq(unsigned long msi)
  1170. {
  1171. int result = 0;
  1172. /* Ensure this is not already allocated */
  1173. if (test_bit((int)msi, gsi_ctx->msi.allocated)) {
  1174. GSIERR("MSI %lu already allocated\n", msi);
  1175. return -GSI_STATUS_ERROR;
  1176. }
  1177. /* Request MSI IRQ
  1178. * NOTE: During the call to devm_request_irq, the
  1179. * __gsi_msi_write_msg callback is triggered.
  1180. */
  1181. result = devm_request_irq(gsi_ctx->dev, gsi_ctx->msi.irq[msi],
  1182. (irq_handler_t)gsi_msi_isr, IRQF_TRIGGER_NONE,
  1183. "gsi_msi", gsi_ctx);
  1184. if (result) {
  1185. GSIERR("failed to register msi irq %u idx %lu\n",
  1186. gsi_ctx->msi.irq[msi], msi);
  1187. return -GSI_STATUS_ERROR;
  1188. }
  1189. set_bit(msi, gsi_ctx->msi.allocated);
  1190. return result;
  1191. }
  1192. static int __gsi_allocate_msis(void)
  1193. {
  1194. int result = 0;
  1195. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  1196. struct msi_desc *desc = NULL;
  1197. #endif
  1198. size_t size = 0;
  1199. /* Allocate all MSIs */
  1200. GSIDBG("gsi_ctx->dev = %lu, gsi_ctx->msi.num = %d", gsi_ctx->dev, gsi_ctx->msi.num);
  1201. result = platform_msi_domain_alloc_irqs(gsi_ctx->dev, gsi_ctx->msi.num,
  1202. __gsi_msi_write_msg);
  1203. if (result) {
  1204. GSIERR("error allocating platform MSIs - %d\n", result);
  1205. return -GSI_STATUS_ERROR;
  1206. }
  1207. GSIDBG("MSI allocating is succesful\n");
  1208. /* Loop through the allocated MSIs and save the info, then
  1209. * request the IRQ.
  1210. */
  1211. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
  1212. for (unsigned long msi = 0; msi < gsi_ctx->msi.num; msi++) {
  1213. /* Save IRQ */
  1214. gsi_ctx->msi.irq[msi] = msi_get_virq(gsi_ctx->dev, msi);
  1215. #else
  1216. for_each_msi_entry(desc, gsi_ctx->dev) {
  1217. unsigned long msi = desc->platform.msi_index;
  1218. /* Ensure a valid index */
  1219. if (msi >= gsi_ctx->msi.num) {
  1220. GSIERR("error invalid MSI %lu\n", msi);
  1221. result = -GSI_STATUS_ERROR;
  1222. goto err_free_msis;
  1223. }
  1224. /* Save IRQ */
  1225. gsi_ctx->msi.irq[msi] = desc->irq;
  1226. GSIDBG("desc->irq =%d\n", desc->irq);
  1227. #endif
  1228. /* Request the IRQ */
  1229. if (__gsi_request_msi_irq(msi)) {
  1230. GSIERR("error requesting IRQ for MSI %lu\n",
  1231. msi);
  1232. result = -GSI_STATUS_ERROR;
  1233. goto err_free_msis;
  1234. }
  1235. GSIDBG("Requesting IRQ succesful\n");
  1236. }
  1237. return result;
  1238. err_free_msis:
  1239. size = sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
  1240. platform_msi_domain_free_irqs(gsi_ctx->dev);
  1241. memset(gsi_ctx->msi.allocated, 0, size);
  1242. return result;
  1243. }
  1244. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
  1245. {
  1246. int res;
  1247. int result = GSI_STATUS_SUCCESS;
  1248. struct gsihal_reg_gsi_status gsi_status;
  1249. struct gsihal_reg_gsi_ee_n_cntxt_gsi_irq gen_irq;
  1250. if (!gsi_ctx) {
  1251. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1252. return -GSI_STATUS_NODEV;
  1253. }
  1254. if (!props || !dev_hdl) {
  1255. GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
  1256. return -GSI_STATUS_INVALID_PARAMS;
  1257. }
  1258. if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
  1259. GSIERR("bad params gsi_ver=%d\n", props->ver);
  1260. return -GSI_STATUS_INVALID_PARAMS;
  1261. }
  1262. if (!props->notify_cb) {
  1263. GSIERR("notify callback must be provided\n");
  1264. return -GSI_STATUS_INVALID_PARAMS;
  1265. }
  1266. if (props->req_clk_cb && !props->rel_clk_cb) {
  1267. GSIERR("rel callback must be provided\n");
  1268. return -GSI_STATUS_INVALID_PARAMS;
  1269. }
  1270. if (gsi_ctx->per_registered) {
  1271. GSIERR("per already registered\n");
  1272. return -GSI_STATUS_UNSUPPORTED_OP;
  1273. }
  1274. spin_lock_init(&gsi_ctx->slock);
  1275. gsi_ctx->per = *props;
  1276. if (props->intr == GSI_INTR_IRQ) {
  1277. if (!props->irq) {
  1278. GSIERR("bad irq specified %u\n", props->irq);
  1279. return -GSI_STATUS_INVALID_PARAMS;
  1280. }
  1281. /*
  1282. * On a real UE, there are two separate interrupt
  1283. * vectors that get directed toward the GSI/IPA
  1284. * drivers. They are handled by gsi_isr() and
  1285. * (ipa_isr() or ipa3_isr()) respectively. In the
  1286. * emulation environment, this is not the case;
  1287. * instead, interrupt vectors are routed to the
  1288. * emualation hardware's interrupt controller, which
  1289. * in turn, forwards a single interrupt to the GSI/IPA
  1290. * driver. When the new interrupt vector is received,
  1291. * the driver needs to probe the interrupt
  1292. * controller's registers so see if one, the other, or
  1293. * both interrupts have occurred. Given the above, we
  1294. * now need to handle both situations, namely: the
  1295. * emulator's and the real UE.
  1296. */
  1297. if (running_emulation) {
  1298. /*
  1299. * New scheme involving the emulator's
  1300. * interrupt controller.
  1301. */
  1302. res = devm_request_threaded_irq(
  1303. gsi_ctx->dev,
  1304. props->irq,
  1305. /* top half handler to follow */
  1306. emulator_hard_irq_isr,
  1307. /* threaded bottom half handler to follow */
  1308. emulator_soft_irq_isr,
  1309. IRQF_SHARED,
  1310. "emulator_intcntrlr",
  1311. gsi_ctx);
  1312. } else {
  1313. /*
  1314. * Traditional scheme used on the real UE.
  1315. */
  1316. res = devm_request_irq(gsi_ctx->dev, props->irq,
  1317. gsi_isr,
  1318. props->req_clk_cb ? IRQF_TRIGGER_RISING :
  1319. IRQF_TRIGGER_HIGH,
  1320. "gsi",
  1321. gsi_ctx);
  1322. }
  1323. if (res) {
  1324. GSIERR(
  1325. "failed to register isr for %u\n",
  1326. props->irq);
  1327. return -GSI_STATUS_ERROR;
  1328. }
  1329. GSIDBG(
  1330. "succeeded to register isr for %u\n",
  1331. props->irq);
  1332. res = enable_irq_wake(props->irq);
  1333. if (res)
  1334. GSIERR("failed to enable wake irq %u\n", props->irq);
  1335. else
  1336. GSIERR("GSI irq is wake enabled %u\n", props->irq);
  1337. } else {
  1338. GSIERR("do not support interrupt type %u\n", props->intr);
  1339. return -GSI_STATUS_UNSUPPORTED_OP;
  1340. }
  1341. /* If MSIs are enabled, make sure they are set up */
  1342. if (gsi_ctx->msi.num) {
  1343. if (__gsi_allocate_msis()) {
  1344. GSIERR("failed to allocate MSIs\n");
  1345. goto err_free_irq;
  1346. }
  1347. }
  1348. /*
  1349. * If base not previously mapped via gsi_map_base(), map it
  1350. * now...
  1351. */
  1352. if (!gsi_ctx->base) {
  1353. res = gsi_map_base(props->phys_addr, props->size, props->ver);
  1354. if (res) {
  1355. result = res;
  1356. goto err_free_msis;
  1357. }
  1358. }
  1359. if (running_emulation) {
  1360. GSIDBG("GSI SW ver register value 0x%x\n",
  1361. gsihal_read_reg_n(GSI_EE_n_GSI_SW_VERSION, 0));
  1362. gsi_ctx->intcntrlr_mem_size =
  1363. props->emulator_intcntrlr_size;
  1364. gsi_ctx->intcntrlr_base =
  1365. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
  1366. devm_ioremap(
  1367. #else
  1368. devm_ioremap_nocache(
  1369. #endif
  1370. gsi_ctx->dev,
  1371. props->emulator_intcntrlr_addr,
  1372. props->emulator_intcntrlr_size);
  1373. if (!gsi_ctx->intcntrlr_base) {
  1374. GSIERR(
  1375. "failed to remap emulator's interrupt controller HW\n");
  1376. gsi_unmap_base();
  1377. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1378. result = -GSI_STATUS_RES_ALLOC_FAILURE;
  1379. goto err_iounmap;
  1380. }
  1381. GSIDBG(
  1382. "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
  1383. &(props->emulator_intcntrlr_addr),
  1384. gsi_ctx->intcntrlr_base,
  1385. props->emulator_intcntrlr_size);
  1386. gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
  1387. gsi_ctx->intcntrlr_client_isr =
  1388. props->emulator_intcntrlr_client_isr;
  1389. }
  1390. gsi_ctx->per_registered = true;
  1391. mutex_init(&gsi_ctx->mlock);
  1392. atomic_set(&gsi_ctx->num_chan, 0);
  1393. atomic_set(&gsi_ctx->num_evt_ring, 0);
  1394. gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
  1395. if (gsi_ctx->max_ch == 0) {
  1396. gsi_unmap_base();
  1397. if (running_emulation)
  1398. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1399. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1400. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1401. GSIERR("failed to get max channels\n");
  1402. result = -GSI_STATUS_ERROR;
  1403. goto err_iounmap;
  1404. }
  1405. gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
  1406. if (gsi_ctx->max_ev == 0) {
  1407. gsi_unmap_base();
  1408. if (running_emulation)
  1409. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1410. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1411. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1412. GSIERR("failed to get max event rings\n");
  1413. result = -GSI_STATUS_ERROR;
  1414. goto err_iounmap;
  1415. }
  1416. if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
  1417. GSIERR("max event rings are beyond absolute maximum\n");
  1418. result = -GSI_STATUS_ERROR;
  1419. goto err_iounmap;
  1420. }
  1421. if (props->mhi_er_id_limits_valid &&
  1422. props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
  1423. gsi_unmap_base();
  1424. if (running_emulation)
  1425. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1426. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1427. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1428. GSIERR("MHI event ring start id %u is beyond max %u\n",
  1429. props->mhi_er_id_limits[0], gsi_ctx->max_ev);
  1430. result = -GSI_STATUS_ERROR;
  1431. goto err_iounmap;
  1432. }
  1433. gsi_ctx->evt_bmap = ~((((unsigned long)1) << gsi_ctx->max_ev) - 1);
  1434. /* exclude reserved mhi events */
  1435. if (props->mhi_er_id_limits_valid)
  1436. gsi_ctx->evt_bmap |=
  1437. ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
  1438. ((1 << (props->mhi_er_id_limits[0])) - 1);
  1439. /*
  1440. * enable all interrupts but GSI_BREAK_POINT.
  1441. * Inter EE commands / interrupt are no supported.
  1442. */
  1443. __gsi_config_type_irq(props->ee, ~0, ~0);
  1444. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1445. __gsi_config_all_ch_irq(props->ee, ~0, ~0);
  1446. __gsi_config_all_evt_irq(props->ee, ~0, ~0);
  1447. __gsi_config_all_ieob_irq(props->ee, ~0, ~0);
  1448. }
  1449. else {
  1450. __gsi_config_ch_irq(props->ee, ~0, ~0);
  1451. __gsi_config_evt_irq(props->ee, ~0, ~0);
  1452. __gsi_config_ieob_irq(props->ee, ~0, ~0);
  1453. }
  1454. __gsi_config_glob_irq(props->ee, ~0, ~0);
  1455. /*
  1456. * Disabling global INT1 interrupt by default and enable it
  1457. * onlt when sending the generic command.
  1458. */
  1459. __gsi_config_glob_irq(props->ee,
  1460. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  1461. gen_irq.gsi_mcs_stack_ovrflow = 1;
  1462. gen_irq.gsi_cmd_fifo_ovrflow = 1;
  1463. gen_irq.gsi_bus_error = 1;
  1464. gen_irq.gsi_break_point = 0;
  1465. gsihal_write_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_EN,
  1466. gsi_ctx->per.ee, &gen_irq);
  1467. gsihal_write_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee, props->intr);
  1468. /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
  1469. if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
  1470. (props->intr != GSI_INTR_MSI)) {
  1471. gsihal_write_reg_n(
  1472. GSI_EE_n_CNTXT_MSI_BASE_LSB, gsi_ctx->per.ee, 0);
  1473. gsihal_write_reg_n(
  1474. GSI_EE_n_CNTXT_MSI_BASE_MSB, gsi_ctx->per.ee, 0);
  1475. }
  1476. gsihal_read_reg_n_fields(GSI_EE_n_GSI_STATUS,
  1477. gsi_ctx->per.ee, &gsi_status);
  1478. if (gsi_status.enabled)
  1479. gsi_ctx->enabled = true;
  1480. else
  1481. GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
  1482. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  1483. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, gsi_ctx->per.ee, 0);
  1484. if (running_emulation) {
  1485. /*
  1486. * Set up the emulator's interrupt controller...
  1487. */
  1488. res = setup_emulator_cntrlr(
  1489. gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
  1490. if (res != 0) {
  1491. GSIERR("setup_emulator_cntrlr() failed\n");
  1492. result = res;
  1493. goto err_iounmap;
  1494. }
  1495. }
  1496. *dev_hdl = (uintptr_t)gsi_ctx;
  1497. gsi_ctx->gsi_isr_cache_index = 0;
  1498. return result;
  1499. err_iounmap:
  1500. gsi_unmap_base();
  1501. if (running_emulation && gsi_ctx->intcntrlr_base != NULL)
  1502. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1503. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1504. err_free_msis:
  1505. if (gsi_ctx->msi.num) {
  1506. size_t size =
  1507. sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
  1508. platform_msi_domain_free_irqs(gsi_ctx->dev);
  1509. memset(gsi_ctx->msi.allocated, 0, size);
  1510. }
  1511. err_free_irq:
  1512. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1513. return result;
  1514. }
  1515. EXPORT_SYMBOL(gsi_register_device);
  1516. int gsi_write_device_scratch(unsigned long dev_hdl,
  1517. struct gsi_device_scratch *val)
  1518. {
  1519. unsigned int max_usb_pkt_size = 0;
  1520. if (!gsi_ctx) {
  1521. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1522. return -GSI_STATUS_NODEV;
  1523. }
  1524. if (!gsi_ctx->per_registered) {
  1525. GSIERR("no client registered\n");
  1526. return -GSI_STATUS_INVALID_PARAMS;
  1527. }
  1528. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1529. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1530. gsi_ctx);
  1531. return -GSI_STATUS_INVALID_PARAMS;
  1532. }
  1533. if (val->max_usb_pkt_size_valid &&
  1534. val->max_usb_pkt_size != 1024 &&
  1535. val->max_usb_pkt_size != 512 &&
  1536. val->max_usb_pkt_size != 64) {
  1537. GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
  1538. val->max_usb_pkt_size);
  1539. return -GSI_STATUS_INVALID_PARAMS;
  1540. }
  1541. mutex_lock(&gsi_ctx->mlock);
  1542. if (val->mhi_base_chan_idx_valid)
  1543. gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
  1544. val->mhi_base_chan_idx;
  1545. if (val->max_usb_pkt_size_valid) {
  1546. max_usb_pkt_size = 2;
  1547. if (val->max_usb_pkt_size > 64)
  1548. max_usb_pkt_size =
  1549. (val->max_usb_pkt_size == 1024) ? 1 : 0;
  1550. gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
  1551. }
  1552. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  1553. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  1554. mutex_unlock(&gsi_ctx->mlock);
  1555. return GSI_STATUS_SUCCESS;
  1556. }
  1557. EXPORT_SYMBOL(gsi_write_device_scratch);
  1558. int gsi_deregister_device(unsigned long dev_hdl, bool force)
  1559. {
  1560. if (!gsi_ctx) {
  1561. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1562. return -GSI_STATUS_NODEV;
  1563. }
  1564. if (!gsi_ctx->per_registered) {
  1565. GSIERR("no client registered\n");
  1566. return -GSI_STATUS_INVALID_PARAMS;
  1567. }
  1568. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1569. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1570. gsi_ctx);
  1571. return -GSI_STATUS_INVALID_PARAMS;
  1572. }
  1573. if (!force && atomic_read(&gsi_ctx->num_chan)) {
  1574. GSIERR("cannot deregister %u channels are still connected\n",
  1575. atomic_read(&gsi_ctx->num_chan));
  1576. return -GSI_STATUS_UNSUPPORTED_OP;
  1577. }
  1578. if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
  1579. GSIERR("cannot deregister %u events are still connected\n",
  1580. atomic_read(&gsi_ctx->num_evt_ring));
  1581. return -GSI_STATUS_UNSUPPORTED_OP;
  1582. }
  1583. /* disable all interrupts */
  1584. __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
  1585. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1586. __gsi_config_all_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1587. __gsi_config_all_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1588. __gsi_config_all_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1589. }
  1590. else {
  1591. __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1592. __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1593. __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1594. }
  1595. __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
  1596. __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
  1597. if (gsi_ctx->msi.num)
  1598. platform_msi_domain_free_irqs(gsi_ctx->dev);
  1599. devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
  1600. gsihal_destroy();
  1601. gsi_unmap_base();
  1602. gsi_ctx->per_registered = false;
  1603. return GSI_STATUS_SUCCESS;
  1604. }
  1605. EXPORT_SYMBOL(gsi_deregister_device);
  1606. static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
  1607. uint8_t evt_id, unsigned int ee)
  1608. {
  1609. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  1610. struct gsihal_reg_ev_ch_k_cntxt_1 ev_ch_k_cntxt_1;
  1611. struct gsihal_reg_ev_ch_k_cntxt_2 ev_ch_k_cntxt_2;
  1612. struct gsihal_reg_ev_ch_k_cntxt_3 ev_ch_k_cntxt_3;
  1613. struct gsihal_reg_ev_ch_k_cntxt_8 ev_ch_k_cntxt_8;
  1614. struct gsihal_reg_ev_ch_k_cntxt_9 ev_ch_k_cntxt_9;
  1615. union gsihal_reg_ev_ch_k_cntxt_10 ev_ch_k_cntxt_10;
  1616. union gsihal_reg_ev_ch_k_cntxt_11 ev_ch_k_cntxt_11;
  1617. struct gsihal_reg_ev_ch_k_cntxt_12 ev_ch_k_cntxt_12;
  1618. struct gsihal_reg_ev_ch_k_cntxt_13 ev_ch_k_cntxt_13;
  1619. GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
  1620. props->re_size);
  1621. ev_ch_k_cntxt_0.chtype = props->intf;
  1622. ev_ch_k_cntxt_0.intype = props->intr;
  1623. ev_ch_k_cntxt_0.element_size = props->re_size;
  1624. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  1625. ee, evt_id, &ev_ch_k_cntxt_0);
  1626. ev_ch_k_cntxt_1.r_length = props->ring_len;
  1627. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_1,
  1628. ee, evt_id,
  1629. &ev_ch_k_cntxt_1);
  1630. ev_ch_k_cntxt_2.r_base_addr_lsbs = GSI_LSB(props->ring_base_addr);
  1631. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_2,
  1632. ee, evt_id,
  1633. &ev_ch_k_cntxt_2);
  1634. ev_ch_k_cntxt_3.r_base_addr_msbs = GSI_MSB(props->ring_base_addr);
  1635. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_3,
  1636. ee, evt_id,
  1637. &ev_ch_k_cntxt_3);
  1638. ev_ch_k_cntxt_8.int_modt = props->int_modt;
  1639. ev_ch_k_cntxt_8.int_modc = props->int_modc;
  1640. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_8,
  1641. ee, evt_id,
  1642. &ev_ch_k_cntxt_8);
  1643. ev_ch_k_cntxt_9.intvec = props->intvec;
  1644. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_9,
  1645. ee, evt_id,
  1646. &ev_ch_k_cntxt_9);
  1647. if(props->intf != GSI_EVT_CHTYPE_WDI3_V2_EV) {
  1648. ev_ch_k_cntxt_10.msi_addr_lsb = GSI_LSB(props->msi_addr);
  1649. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_10,
  1650. ee, evt_id,
  1651. &ev_ch_k_cntxt_10);
  1652. ev_ch_k_cntxt_11.msi_addr_msb = GSI_MSB(props->msi_addr);
  1653. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_11,
  1654. ee, evt_id,
  1655. &ev_ch_k_cntxt_11);
  1656. ev_ch_k_cntxt_12.rp_update_addr_lsb = GSI_LSB(props->rp_update_addr);
  1657. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_12,
  1658. ee, evt_id,
  1659. &ev_ch_k_cntxt_12);
  1660. ev_ch_k_cntxt_13.rp_update_addr_msb = GSI_MSB(props->rp_update_addr);
  1661. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_13,
  1662. ee, evt_id,
  1663. &ev_ch_k_cntxt_13);
  1664. }
  1665. else {
  1666. ev_ch_k_cntxt_10.rp_addr_lsb = GSI_LSB(props->rp_update_addr);
  1667. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_10,
  1668. ee, evt_id,
  1669. &ev_ch_k_cntxt_10);
  1670. ev_ch_k_cntxt_11.rp_addr_msb = GSI_MSB(props->rp_update_addr);
  1671. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_11,
  1672. ee, evt_id,
  1673. &ev_ch_k_cntxt_11);
  1674. }
  1675. }
  1676. static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
  1677. struct gsi_ring_ctx *ctx)
  1678. {
  1679. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1680. ctx->base = props->ring_base_addr;
  1681. ctx->wp = ctx->base;
  1682. ctx->rp = ctx->base;
  1683. ctx->wp_local = ctx->base;
  1684. ctx->rp_local = ctx->base;
  1685. ctx->len = props->ring_len;
  1686. ctx->elem_sz = props->re_size;
  1687. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1688. ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
  1689. if (props->rp_update_vaddr)
  1690. *(uint64_t *)(props->rp_update_vaddr) = ctx->rp_local;
  1691. }
  1692. static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
  1693. {
  1694. unsigned long flags;
  1695. struct gsihal_reg_gsi_ee_n_ev_ch_k_doorbell_1 db;
  1696. spin_lock_irqsave(&ctx->ring.slock, flags);
  1697. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1698. ctx->ring.wp_local = ctx->ring.base +
  1699. ctx->ring.max_num_elem * ctx->ring.elem_sz;
  1700. /* write order MUST be MSB followed by LSB */
  1701. db.write_ptr_msb = GSI_MSB(ctx->ring.wp_local);
  1702. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_DOORBELL_1,
  1703. gsi_ctx->per.ee, ctx->id, &db);
  1704. gsi_ring_evt_doorbell(ctx);
  1705. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1706. }
  1707. static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
  1708. {
  1709. unsigned long flags;
  1710. spin_lock_irqsave(&ctx->ring.slock, flags);
  1711. if (ctx->ring.base_va)
  1712. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1713. ctx->ring.wp_local = ctx->ring.base +
  1714. ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
  1715. gsi_ring_evt_doorbell(ctx);
  1716. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1717. }
  1718. static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
  1719. {
  1720. uint64_t ra;
  1721. if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
  1722. props->ring_len % 4) ||
  1723. (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
  1724. props->ring_len % 8) ||
  1725. (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
  1726. props->ring_len % 16) ||
  1727. (props->re_size == GSI_EVT_RING_RE_SIZE_32B &&
  1728. props->ring_len % 32)) {
  1729. GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
  1730. props->ring_len, props->re_size);
  1731. return -GSI_STATUS_INVALID_PARAMS;
  1732. }
  1733. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  1734. return -GSI_STATUS_INVALID_PARAMS;
  1735. ra = props->ring_base_addr;
  1736. do_div(ra, roundup_pow_of_two(props->ring_len));
  1737. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1738. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1739. props->ring_base_addr,
  1740. roundup_pow_of_two(props->ring_len));
  1741. return -GSI_STATUS_INVALID_PARAMS;
  1742. }
  1743. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1744. !props->ring_base_vaddr) {
  1745. GSIERR("protocol %u requires ring base VA\n", props->intf);
  1746. return -GSI_STATUS_INVALID_PARAMS;
  1747. }
  1748. if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
  1749. (!props->evchid_valid ||
  1750. props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
  1751. props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
  1752. GSIERR("MHI requires evchid valid=%d val=%u\n",
  1753. props->evchid_valid, props->evchid);
  1754. return -GSI_STATUS_INVALID_PARAMS;
  1755. }
  1756. if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
  1757. props->evchid_valid) {
  1758. GSIERR("protocol %u cannot specify evchid\n", props->intf);
  1759. return -GSI_STATUS_INVALID_PARAMS;
  1760. }
  1761. if (!props->err_cb) {
  1762. GSIERR("err callback must be provided\n");
  1763. return -GSI_STATUS_INVALID_PARAMS;
  1764. }
  1765. return GSI_STATUS_SUCCESS;
  1766. }
  1767. /**
  1768. * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
  1769. * by IPA driver. Need to do this in GSI since only GSI knows which TRE
  1770. * are being used or not. However, IPA is the one that does cleaning,
  1771. * therefore we pass a callback from IPA and call it using params from GSI
  1772. *
  1773. * @chan_hdl: hdl of the gsi channel user data array to be cleaned
  1774. * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
  1775. * @chan_user_data: ipa_sys_context of the gsi_channel
  1776. * @xfer_uder_data: user data array element (rx_pkt wrapper)
  1777. *
  1778. * Returns: 0 on success, negative on failure
  1779. */
  1780. static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
  1781. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
  1782. {
  1783. struct gsi_chan_ctx *ctx;
  1784. uint64_t i;
  1785. uint16_t rp_idx;
  1786. ctx = &gsi_ctx->chan[chan_hdl];
  1787. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  1788. GSIERR("bad state %d\n", ctx->state);
  1789. return -GSI_STATUS_UNSUPPORTED_OP;
  1790. }
  1791. /* for coalescing, traverse the whole array */
  1792. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  1793. size_t user_data_size =
  1794. ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
  1795. for (i = 0; i < user_data_size; i++) {
  1796. if (ctx->user_data[i].valid)
  1797. cleanup_cb(ctx->props.chan_user_data,
  1798. ctx->user_data[i].p);
  1799. }
  1800. } else {
  1801. /* for non-coalescing, clean between RP and WP */
  1802. while (ctx->ring.rp_local != ctx->ring.wp_local) {
  1803. rp_idx = gsi_find_idx_from_addr(&ctx->ring,
  1804. ctx->ring.rp_local);
  1805. WARN_ON(!ctx->user_data[rp_idx].valid);
  1806. cleanup_cb(ctx->props.chan_user_data,
  1807. ctx->user_data[rp_idx].p);
  1808. gsi_incr_ring_rp(&ctx->ring);
  1809. }
  1810. }
  1811. return 0;
  1812. }
  1813. /**
  1814. * gsi_read_event_ring_rp_ddr - function returns the RP value of the event
  1815. * ring read from the ring context register.
  1816. *
  1817. * @props: Props structere of the event channel
  1818. * @id: Event channel index
  1819. * @ee: EE
  1820. *
  1821. * @Return pointer to the read pointer
  1822. */
  1823. static inline uint64_t gsi_read_event_ring_rp_ddr(struct gsi_evt_ring_props* props,
  1824. uint8_t id, int ee)
  1825. {
  1826. return readl_relaxed(props->rp_update_vaddr);
  1827. }
  1828. /**
  1829. * gsi_read_event_ring_rp_reg - function returns the RP value of the event ring
  1830. * read from the DDR.
  1831. *
  1832. * @props: Props structere of the event channel
  1833. * @id: Event channel index
  1834. * @ee: EE
  1835. *
  1836. * @Return pointer to the read pointer
  1837. */
  1838. static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
  1839. uint8_t id, int ee)
  1840. {
  1841. uint64_t rp;
  1842. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
  1843. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_5, ee, id)) << 32;
  1844. return rp;
  1845. }
  1846. static int __gsi_pair_msi(struct gsi_evt_ctx *ctx,
  1847. struct gsi_evt_ring_props *props)
  1848. {
  1849. int result = GSI_STATUS_SUCCESS;
  1850. unsigned long msi = 0;
  1851. if (IS_ERR_OR_NULL(ctx) || IS_ERR_OR_NULL(props) || IS_ERR_OR_NULL(gsi_ctx))
  1852. BUG();
  1853. /* Find the first unused MSI */
  1854. msi = find_first_zero_bit(gsi_ctx->msi.used, gsi_ctx->msi.num);
  1855. if (msi >= gsi_ctx->msi.num) {
  1856. GSIERR("No free MSIs for evt %u\n", ctx->id);
  1857. return -GSI_STATUS_ERROR;
  1858. }
  1859. /* Ensure it's been allocated */
  1860. if (!test_bit((int)msi, gsi_ctx->msi.allocated)) {
  1861. GSIDBG("MSI %lu not allocated\n", msi);
  1862. return -GSI_STATUS_ERROR;
  1863. }
  1864. /* Save the event ID for later lookup */
  1865. gsi_ctx->msi.evt[msi] = ctx->id;
  1866. /* Add this event to the IRQ mask */
  1867. set_bit((int)ctx->id, &gsi_ctx->msi.mask);
  1868. props->intvec = gsi_ctx->msi.msg[msi].data;
  1869. props->msi_addr = (uint64_t)gsi_ctx->msi.msg[msi].address_hi << 32 |
  1870. (uint64_t)gsi_ctx->msi.msg[msi].address_lo;
  1871. GSIDBG("props->intvec = %d, props->msi_addr = %lu\n", props->intvec, props->msi_addr);
  1872. if (props->msi_addr == 0)
  1873. BUG();
  1874. /* Mark MSI as used */
  1875. set_bit(msi, gsi_ctx->msi.used);
  1876. return result;
  1877. }
  1878. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1879. unsigned long *evt_ring_hdl)
  1880. {
  1881. unsigned long evt_id;
  1882. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
  1883. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1884. struct gsi_evt_ctx *ctx;
  1885. int res = 0;
  1886. int ee;
  1887. unsigned long flags;
  1888. if (!gsi_ctx) {
  1889. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1890. return -GSI_STATUS_NODEV;
  1891. }
  1892. if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  1893. GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
  1894. props, dev_hdl, evt_ring_hdl);
  1895. return -GSI_STATUS_INVALID_PARAMS;
  1896. }
  1897. if (gsi_validate_evt_ring_props(props)) {
  1898. GSIERR("invalid params\n");
  1899. return -GSI_STATUS_INVALID_PARAMS;
  1900. }
  1901. if (!props->evchid_valid) {
  1902. mutex_lock(&gsi_ctx->mlock);
  1903. evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
  1904. sizeof(unsigned long) * BITS_PER_BYTE);
  1905. if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
  1906. GSIERR("failed to alloc event ID\n");
  1907. mutex_unlock(&gsi_ctx->mlock);
  1908. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1909. }
  1910. set_bit(evt_id, &gsi_ctx->evt_bmap);
  1911. mutex_unlock(&gsi_ctx->mlock);
  1912. } else {
  1913. evt_id = props->evchid;
  1914. }
  1915. GSIDBG("Using %lu as virt evt id\n", evt_id);
  1916. if (props->rp_update_addr != 0) {
  1917. GSIDBG("Using DDR to read event RP for virt evt id: %lu\n",
  1918. evt_id);
  1919. props->gsi_read_event_ring_rp =
  1920. gsi_read_event_ring_rp_ddr;
  1921. }
  1922. else {
  1923. GSIDBG("Using CONTEXT reg to read event RP for virt evt id: %lu\n",
  1924. evt_id);
  1925. props->gsi_read_event_ring_rp =
  1926. gsi_read_event_ring_rp_reg;
  1927. }
  1928. ctx = &gsi_ctx->evtr[evt_id];
  1929. memset(ctx, 0, sizeof(*ctx));
  1930. mutex_init(&ctx->mlock);
  1931. init_completion(&ctx->compl);
  1932. atomic_set(&ctx->chan_ref_cnt, 0);
  1933. ctx->num_of_chan_allocated = 0;
  1934. ctx->id = evt_id;
  1935. mutex_lock(&gsi_ctx->mlock);
  1936. /* Pair an MSI with this event if this is an MSI and GPI event channel
  1937. * NOTE: This modifies props, so must be before props are saved to ctx.
  1938. */
  1939. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1940. props->intr == GSI_INTR_MSI) {
  1941. if (__gsi_pair_msi(ctx, props)) {
  1942. GSIERR("evt_id=%lu failed to pair MSI\n", evt_id);
  1943. if (!props->evchid_valid)
  1944. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1945. mutex_unlock(&gsi_ctx->mlock);
  1946. return -GSI_STATUS_NODEV;
  1947. }
  1948. GSIDBG("evt_id=%lu pair MSI succesful\n", evt_id);
  1949. }
  1950. ctx->props = *props;
  1951. ee = gsi_ctx->per.ee;
  1952. ev_ch_cmd.opcode = op;
  1953. ev_ch_cmd.chid = evt_id;
  1954. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD, ee, &ev_ch_cmd);
  1955. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1956. if (res == 0) {
  1957. GSIERR("evt_id=%lu timed out\n", evt_id);
  1958. if (!props->evchid_valid)
  1959. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1960. mutex_unlock(&gsi_ctx->mlock);
  1961. return -GSI_STATUS_TIMED_OUT;
  1962. }
  1963. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1964. GSIERR("evt_id=%lu allocation failed state=%u\n",
  1965. evt_id, ctx->state);
  1966. if (!props->evchid_valid)
  1967. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1968. mutex_unlock(&gsi_ctx->mlock);
  1969. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1970. }
  1971. gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
  1972. spin_lock_init(&ctx->ring.slock);
  1973. gsi_init_evt_ring(props, &ctx->ring);
  1974. ctx->id = evt_id;
  1975. *evt_ring_hdl = evt_id;
  1976. atomic_inc(&gsi_ctx->num_evt_ring);
  1977. if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
  1978. gsi_prime_evt_ring(ctx);
  1979. else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
  1980. gsi_prime_evt_ring_wdi(ctx);
  1981. mutex_unlock(&gsi_ctx->mlock);
  1982. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1983. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1984. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
  1985. gsihal_get_ch_reg_idx(evt_id), gsihal_get_ch_reg_mask(evt_id));
  1986. }
  1987. else {
  1988. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, 1 << evt_id);
  1989. }
  1990. /* enable ieob interrupts for GPI, enable MSI interrupts */
  1991. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1992. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1993. (props->intr != GSI_INTR_MSI))
  1994. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1995. gsihal_get_ch_reg_mask(evt_id),
  1996. 0);
  1997. else
  1998. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1999. gsihal_get_ch_reg_mask(evt_id),
  2000. ~0);
  2001. }
  2002. else {
  2003. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  2004. (props->intr != GSI_INTR_MSI))
  2005. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
  2006. else
  2007. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
  2008. }
  2009. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  2010. return GSI_STATUS_SUCCESS;
  2011. }
  2012. EXPORT_SYMBOL(gsi_alloc_evt_ring);
  2013. static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  2014. union __packed gsi_evt_scratch val)
  2015. {
  2016. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_0,
  2017. gsi_ctx->per.ee, evt_ring_hdl, val.data.word1);
  2018. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_1,
  2019. gsi_ctx->per.ee, evt_ring_hdl, val.data.word2);
  2020. }
  2021. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  2022. union __packed gsi_evt_scratch val)
  2023. {
  2024. struct gsi_evt_ctx *ctx;
  2025. if (!gsi_ctx) {
  2026. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2027. return -GSI_STATUS_NODEV;
  2028. }
  2029. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2030. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2031. return -GSI_STATUS_INVALID_PARAMS;
  2032. }
  2033. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2034. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2035. GSIERR("bad state %d\n",
  2036. gsi_ctx->evtr[evt_ring_hdl].state);
  2037. return -GSI_STATUS_UNSUPPORTED_OP;
  2038. }
  2039. mutex_lock(&ctx->mlock);
  2040. ctx->scratch = val;
  2041. __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
  2042. mutex_unlock(&ctx->mlock);
  2043. return GSI_STATUS_SUCCESS;
  2044. }
  2045. EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
  2046. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
  2047. {
  2048. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  2049. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
  2050. struct gsi_evt_ctx *ctx;
  2051. int res = 0;
  2052. u32 msi;
  2053. if (!gsi_ctx) {
  2054. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2055. return -GSI_STATUS_NODEV;
  2056. }
  2057. if (evt_ring_hdl >= gsi_ctx->max_ev ||
  2058. evt_ring_hdl >= GSI_EVT_RING_MAX) {
  2059. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2060. return -GSI_STATUS_INVALID_PARAMS;
  2061. }
  2062. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2063. if (atomic_read(&ctx->chan_ref_cnt)) {
  2064. GSIERR("%d channels still using this event ring\n",
  2065. atomic_read(&ctx->chan_ref_cnt));
  2066. return -GSI_STATUS_UNSUPPORTED_OP;
  2067. }
  2068. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2069. GSIERR("bad state %d\n", ctx->state);
  2070. return -GSI_STATUS_UNSUPPORTED_OP;
  2071. }
  2072. /* Unpair the MSI */
  2073. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2074. ctx->props.intr == GSI_INTR_MSI) {
  2075. GSIERR("Interrupt dereg for msi_irq = %d\n", ctx->props.msi_irq);
  2076. for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
  2077. if (gsi_ctx->msi.msg[msi].data == ctx->props.intvec) {
  2078. mutex_lock(&gsi_ctx->mlock);
  2079. clear_bit(msi, gsi_ctx->msi.used);
  2080. gsi_ctx->msi.evt[msi] = 0;
  2081. clear_bit(evt_ring_hdl, &gsi_ctx->msi.mask);
  2082. mutex_unlock(&gsi_ctx->mlock);
  2083. }
  2084. }
  2085. }
  2086. mutex_lock(&gsi_ctx->mlock);
  2087. reinit_completion(&ctx->compl);
  2088. ev_ch_cmd.chid = evt_ring_hdl;
  2089. ev_ch_cmd.opcode = op;
  2090. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  2091. gsi_ctx->per.ee, &ev_ch_cmd);
  2092. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2093. if (res == 0) {
  2094. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  2095. mutex_unlock(&gsi_ctx->mlock);
  2096. return -GSI_STATUS_TIMED_OUT;
  2097. }
  2098. if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  2099. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  2100. ctx->state);
  2101. /*
  2102. * IPA Hardware returned GSI RING not allocated, which is
  2103. * unexpected hardware state.
  2104. */
  2105. GSI_ASSERT();
  2106. }
  2107. mutex_unlock(&gsi_ctx->mlock);
  2108. if (!ctx->props.evchid_valid) {
  2109. mutex_lock(&gsi_ctx->mlock);
  2110. clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
  2111. mutex_unlock(&gsi_ctx->mlock);
  2112. }
  2113. atomic_dec(&gsi_ctx->num_evt_ring);
  2114. return GSI_STATUS_SUCCESS;
  2115. }
  2116. EXPORT_SYMBOL(gsi_dealloc_evt_ring);
  2117. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  2118. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2119. {
  2120. struct gsi_evt_ctx *ctx;
  2121. if (!gsi_ctx) {
  2122. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2123. return -GSI_STATUS_NODEV;
  2124. }
  2125. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2126. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2127. db_addr_wp_lsb);
  2128. return -GSI_STATUS_INVALID_PARAMS;
  2129. }
  2130. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2131. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2132. return -GSI_STATUS_INVALID_PARAMS;
  2133. }
  2134. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2135. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2136. GSIERR("bad state %d\n",
  2137. gsi_ctx->evtr[evt_ring_hdl].state);
  2138. return -GSI_STATUS_UNSUPPORTED_OP;
  2139. }
  2140. *db_addr_wp_lsb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  2141. GSI_EE_n_EV_CH_k_DOORBELL_0, gsi_ctx->per.ee, evt_ring_hdl);
  2142. *db_addr_wp_msb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  2143. GSI_EE_n_EV_CH_k_DOORBELL_1, gsi_ctx->per.ee, evt_ring_hdl);
  2144. return GSI_STATUS_SUCCESS;
  2145. }
  2146. EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
  2147. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
  2148. {
  2149. struct gsi_evt_ctx *ctx;
  2150. if (!gsi_ctx) {
  2151. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2152. return -GSI_STATUS_NODEV;
  2153. }
  2154. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2155. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2156. return -GSI_STATUS_INVALID_PARAMS;
  2157. }
  2158. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2159. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2160. GSIERR("bad state %d\n",
  2161. gsi_ctx->evtr[evt_ring_hdl].state);
  2162. return -GSI_STATUS_UNSUPPORTED_OP;
  2163. }
  2164. ctx->ring.wp_local = value;
  2165. gsi_ring_evt_doorbell(ctx);
  2166. return GSI_STATUS_SUCCESS;
  2167. }
  2168. EXPORT_SYMBOL(gsi_ring_evt_ring_db);
  2169. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
  2170. {
  2171. struct gsi_chan_ctx *ctx;
  2172. if (!gsi_ctx) {
  2173. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2174. return -GSI_STATUS_NODEV;
  2175. }
  2176. if (chan_hdl >= gsi_ctx->max_ch) {
  2177. GSIERR("bad chan_hdl=%lu\n", chan_hdl);
  2178. return -GSI_STATUS_INVALID_PARAMS;
  2179. }
  2180. ctx = &gsi_ctx->chan[chan_hdl];
  2181. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  2182. GSIERR("bad state %d\n", ctx->state);
  2183. return -GSI_STATUS_UNSUPPORTED_OP;
  2184. }
  2185. ctx->ring.wp_local = value;
  2186. /* write MSB first */
  2187. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2188. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  2189. gsi_ring_chan_doorbell(ctx);
  2190. return GSI_STATUS_SUCCESS;
  2191. }
  2192. EXPORT_SYMBOL(gsi_ring_ch_ring_db);
  2193. int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
  2194. {
  2195. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  2196. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
  2197. struct gsi_evt_ctx *ctx;
  2198. int res;
  2199. if (!gsi_ctx) {
  2200. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2201. return -GSI_STATUS_NODEV;
  2202. }
  2203. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2204. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2205. return -GSI_STATUS_INVALID_PARAMS;
  2206. }
  2207. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2208. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2209. GSIERR("bad state %d\n", ctx->state);
  2210. return -GSI_STATUS_UNSUPPORTED_OP;
  2211. }
  2212. mutex_lock(&gsi_ctx->mlock);
  2213. reinit_completion(&ctx->compl);
  2214. ev_ch_cmd.chid = evt_ring_hdl;
  2215. ev_ch_cmd.opcode = op;
  2216. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  2217. gsi_ctx->per.ee, &ev_ch_cmd);
  2218. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2219. if (res == 0) {
  2220. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  2221. mutex_unlock(&gsi_ctx->mlock);
  2222. return -GSI_STATUS_TIMED_OUT;
  2223. }
  2224. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2225. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  2226. ctx->state);
  2227. /*
  2228. * IPA Hardware returned GSI RING not allocated, which is
  2229. * unexpected. Indicates hardware instability.
  2230. */
  2231. GSI_ASSERT();
  2232. }
  2233. gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
  2234. gsi_init_evt_ring(&ctx->props, &ctx->ring);
  2235. /* restore scratch */
  2236. __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
  2237. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
  2238. gsi_prime_evt_ring(ctx);
  2239. if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
  2240. gsi_prime_evt_ring_wdi(ctx);
  2241. mutex_unlock(&gsi_ctx->mlock);
  2242. return GSI_STATUS_SUCCESS;
  2243. }
  2244. EXPORT_SYMBOL(gsi_reset_evt_ring);
  2245. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  2246. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  2247. {
  2248. struct gsi_evt_ctx *ctx;
  2249. if (!gsi_ctx) {
  2250. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2251. return -GSI_STATUS_NODEV;
  2252. }
  2253. if (!props || !scr) {
  2254. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  2255. return -GSI_STATUS_INVALID_PARAMS;
  2256. }
  2257. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2258. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2259. return -GSI_STATUS_INVALID_PARAMS;
  2260. }
  2261. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2262. if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  2263. GSIERR("bad state %d\n", ctx->state);
  2264. return -GSI_STATUS_UNSUPPORTED_OP;
  2265. }
  2266. mutex_lock(&ctx->mlock);
  2267. *props = ctx->props;
  2268. *scr = ctx->scratch;
  2269. mutex_unlock(&ctx->mlock);
  2270. return GSI_STATUS_SUCCESS;
  2271. }
  2272. EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
  2273. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  2274. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  2275. {
  2276. struct gsi_evt_ctx *ctx;
  2277. if (!gsi_ctx) {
  2278. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2279. return -GSI_STATUS_NODEV;
  2280. }
  2281. if (!props || gsi_validate_evt_ring_props(props)) {
  2282. GSIERR("bad params props=%pK\n", props);
  2283. return -GSI_STATUS_INVALID_PARAMS;
  2284. }
  2285. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2286. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2287. return -GSI_STATUS_INVALID_PARAMS;
  2288. }
  2289. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2290. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2291. GSIERR("bad state %d\n", ctx->state);
  2292. return -GSI_STATUS_UNSUPPORTED_OP;
  2293. }
  2294. if (ctx->props.exclusive != props->exclusive) {
  2295. GSIERR("changing immutable fields not supported\n");
  2296. return -GSI_STATUS_UNSUPPORTED_OP;
  2297. }
  2298. mutex_lock(&ctx->mlock);
  2299. ctx->props = *props;
  2300. if (scr)
  2301. ctx->scratch = *scr;
  2302. mutex_unlock(&ctx->mlock);
  2303. return gsi_reset_evt_ring(evt_ring_hdl);
  2304. }
  2305. EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
  2306. static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
  2307. unsigned int ee)
  2308. {
  2309. struct gsihal_reg_gsi_ee_n_gsi_ch_k_qos ch_k_qos;
  2310. ch_k_qos.wrr_weight = props->low_weight;
  2311. ch_k_qos.max_prefetch = props->max_prefetch;
  2312. ch_k_qos.use_db_eng = props->use_db_eng;
  2313. if (gsi_ctx->per.ver >= GSI_VER_2_0) {
  2314. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  2315. ch_k_qos.use_escape_buf_only = props->prefetch_mode;
  2316. } else {
  2317. ch_k_qos.prefetch_mode = props->prefetch_mode;
  2318. ch_k_qos.empty_lvl_thrshold =
  2319. props->empty_lvl_threshold;
  2320. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  2321. ch_k_qos.db_in_bytes = props->db_in_bytes;
  2322. if (gsi_ctx->per.ver >= GSI_VER_3_0)
  2323. ch_k_qos.low_latency_en = props->low_latency_en;
  2324. }
  2325. }
  2326. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_QOS,
  2327. ee, props->ch_id, &ch_k_qos);
  2328. }
  2329. static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
  2330. uint8_t erindex)
  2331. {
  2332. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  2333. struct gsihal_reg_ch_k_cntxt_1 ch_k_cntxt_1;
  2334. switch (props->prot) {
  2335. case GSI_CHAN_PROT_MHI:
  2336. case GSI_CHAN_PROT_XHCI:
  2337. case GSI_CHAN_PROT_GPI:
  2338. case GSI_CHAN_PROT_XDCI:
  2339. case GSI_CHAN_PROT_WDI2:
  2340. case GSI_CHAN_PROT_WDI3:
  2341. case GSI_CHAN_PROT_GCI:
  2342. case GSI_CHAN_PROT_MHIP:
  2343. case GSI_CHAN_PROT_WDI3_V2:
  2344. ch_k_cntxt_0.chtype_protocol_msb = 0;
  2345. break;
  2346. case GSI_CHAN_PROT_AQC:
  2347. case GSI_CHAN_PROT_11AD:
  2348. case GSI_CHAN_PROT_RTK:
  2349. case GSI_CHAN_PROT_QDSS:
  2350. case GSI_CHAN_PROT_NTN:
  2351. ch_k_cntxt_0.chtype_protocol_msb = 1;
  2352. break;
  2353. default:
  2354. GSIERR("Unsupported protocol %d\n", props->prot);
  2355. WARN_ON(1);
  2356. return;
  2357. }
  2358. ch_k_cntxt_0.chtype_protocol = props->prot;
  2359. ch_k_cntxt_0.chtype_dir = props->dir;
  2360. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2361. ch_k_cntxt_1.erindex = erindex;
  2362. } else {
  2363. ch_k_cntxt_0.erindex = erindex;
  2364. }
  2365. ch_k_cntxt_0.element_size = props->re_size;
  2366. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2367. ee, props->ch_id, &ch_k_cntxt_0);
  2368. ch_k_cntxt_1.r_length = props->ring_len;
  2369. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2370. ee, props->ch_id, &ch_k_cntxt_1);
  2371. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2372. ee, props->ch_id, GSI_LSB(props->ring_base_addr));
  2373. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2374. ee, props->ch_id, GSI_MSB(props->ring_base_addr));
  2375. gsi_program_chan_ctx_qos(props, ee);
  2376. }
  2377. static void gsi_init_chan_ring(struct gsi_chan_props *props,
  2378. struct gsi_ring_ctx *ctx)
  2379. {
  2380. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  2381. ctx->base = props->ring_base_addr;
  2382. ctx->wp = ctx->base;
  2383. ctx->rp = ctx->base;
  2384. ctx->wp_local = ctx->base;
  2385. ctx->rp_local = ctx->base;
  2386. ctx->len = props->ring_len;
  2387. ctx->elem_sz = props->re_size;
  2388. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  2389. ctx->end = ctx->base + (ctx->max_num_elem + 1) *
  2390. ctx->elem_sz;
  2391. }
  2392. static int gsi_validate_channel_props(struct gsi_chan_props *props)
  2393. {
  2394. uint64_t ra;
  2395. uint64_t last;
  2396. if (props->ch_id >= gsi_ctx->max_ch) {
  2397. GSIERR("ch_id %u invalid\n", props->ch_id);
  2398. return -GSI_STATUS_INVALID_PARAMS;
  2399. }
  2400. if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
  2401. props->ring_len % 4) ||
  2402. (props->re_size == GSI_CHAN_RE_SIZE_8B &&
  2403. props->ring_len % 8) ||
  2404. (props->re_size == GSI_CHAN_RE_SIZE_16B &&
  2405. props->ring_len % 16) ||
  2406. (props->re_size == GSI_CHAN_RE_SIZE_32B &&
  2407. props->ring_len % 32) ||
  2408. (props->re_size == GSI_CHAN_RE_SIZE_64B &&
  2409. props->ring_len % 64)) {
  2410. GSIERR("bad params ring_len %u not a multiple of re size %u\n",
  2411. props->ring_len, props->re_size);
  2412. return -GSI_STATUS_INVALID_PARAMS;
  2413. }
  2414. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  2415. return -GSI_STATUS_INVALID_PARAMS;
  2416. ra = props->ring_base_addr;
  2417. do_div(ra, roundup_pow_of_two(props->ring_len));
  2418. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  2419. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  2420. props->ring_base_addr,
  2421. roundup_pow_of_two(props->ring_len));
  2422. return -GSI_STATUS_INVALID_PARAMS;
  2423. }
  2424. last = props->ring_base_addr + props->ring_len - props->re_size;
  2425. /* MSB should stay same within the ring */
  2426. if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
  2427. (last & 0xFFFFFFFF00000000ULL)) {
  2428. GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
  2429. props->ring_base_addr,
  2430. props->ring_len);
  2431. return -GSI_STATUS_INVALID_PARAMS;
  2432. }
  2433. if (props->prot == GSI_CHAN_PROT_GPI &&
  2434. !props->ring_base_vaddr) {
  2435. GSIERR("protocol %u requires ring base VA\n", props->prot);
  2436. return -GSI_STATUS_INVALID_PARAMS;
  2437. }
  2438. if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
  2439. GSIERR("invalid channel low weight %u\n", props->low_weight);
  2440. return -GSI_STATUS_INVALID_PARAMS;
  2441. }
  2442. if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
  2443. GSIERR("xfer callback must be provided\n");
  2444. return -GSI_STATUS_INVALID_PARAMS;
  2445. }
  2446. if (!props->err_cb) {
  2447. GSIERR("err callback must be provided\n");
  2448. return -GSI_STATUS_INVALID_PARAMS;
  2449. }
  2450. return GSI_STATUS_SUCCESS;
  2451. }
  2452. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  2453. unsigned long *chan_hdl)
  2454. {
  2455. struct gsi_chan_ctx *ctx;
  2456. int res;
  2457. int ee;
  2458. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2459. uint8_t erindex;
  2460. struct gsi_user_data *user_data;
  2461. size_t user_data_size;
  2462. if (!gsi_ctx) {
  2463. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2464. return -GSI_STATUS_NODEV;
  2465. }
  2466. if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  2467. GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
  2468. props, dev_hdl, chan_hdl);
  2469. return -GSI_STATUS_INVALID_PARAMS;
  2470. }
  2471. if (gsi_validate_channel_props(props)) {
  2472. GSIERR("bad params\n");
  2473. return -GSI_STATUS_INVALID_PARAMS;
  2474. }
  2475. if (props->evt_ring_hdl != ~0) {
  2476. if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
  2477. GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
  2478. return -GSI_STATUS_INVALID_PARAMS;
  2479. }
  2480. if (atomic_read(
  2481. &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
  2482. gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
  2483. gsi_ctx->evtr[props->evt_ring_hdl].chan[0]->props.prot !=
  2484. GSI_CHAN_PROT_GCI) {
  2485. GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
  2486. props->evt_ring_hdl, chan_hdl);
  2487. return -GSI_STATUS_UNSUPPORTED_OP;
  2488. }
  2489. }
  2490. ctx = &gsi_ctx->chan[props->ch_id];
  2491. if (ctx->allocated) {
  2492. GSIERR("chan %d already allocated\n", props->ch_id);
  2493. return -GSI_STATUS_NODEV;
  2494. }
  2495. memset(ctx, 0, sizeof(*ctx));
  2496. /* For IPA offloaded WDI channels not required user_data pointer */
  2497. if (props->prot != GSI_CHAN_PROT_WDI2 &&
  2498. props->prot != GSI_CHAN_PROT_WDI3 &&
  2499. props->prot != GSI_CHAN_PROT_WDI3_V2)
  2500. user_data_size = props->ring_len / props->re_size;
  2501. else
  2502. user_data_size = props->re_size;
  2503. /*
  2504. * GCI channels might have OOO event completions up to GSI_VEID_MAX.
  2505. * user_data needs to be large enough to accommodate those.
  2506. * TODO: increase user data size if GSI_VEID_MAX is not enough
  2507. */
  2508. if (props->prot == GSI_CHAN_PROT_GCI)
  2509. user_data_size += GSI_VEID_MAX;
  2510. user_data = devm_kzalloc(gsi_ctx->dev,
  2511. user_data_size * sizeof(*user_data),
  2512. GFP_KERNEL);
  2513. if (user_data == NULL) {
  2514. GSIERR("context not allocated\n");
  2515. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2516. }
  2517. mutex_init(&ctx->mlock);
  2518. init_completion(&ctx->compl);
  2519. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2520. ctx->props = *props;
  2521. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2522. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2523. mutex_lock(&gsi_ctx->mlock);
  2524. ee = gsi_ctx->per.ee;
  2525. gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
  2526. ch_cmd.chid = props->ch_id;
  2527. ch_cmd.opcode = op;
  2528. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2529. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2530. if (res == 0) {
  2531. GSIERR("chan_hdl=%u timed out\n", props->ch_id);
  2532. mutex_unlock(&gsi_ctx->mlock);
  2533. devm_kfree(gsi_ctx->dev, user_data);
  2534. return -GSI_STATUS_TIMED_OUT;
  2535. }
  2536. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2537. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2538. props->ch_id, ctx->state);
  2539. mutex_unlock(&gsi_ctx->mlock);
  2540. devm_kfree(gsi_ctx->dev, user_data);
  2541. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2542. }
  2543. mutex_unlock(&gsi_ctx->mlock);
  2544. } else {
  2545. mutex_lock(&gsi_ctx->mlock);
  2546. ctx->state = GSI_CHAN_STATE_ALLOCATED;
  2547. mutex_unlock(&gsi_ctx->mlock);
  2548. }
  2549. erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
  2550. GSI_NO_EVT_ERINDEX;
  2551. if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
  2552. GSIERR("invalid erindex %u\n", erindex);
  2553. devm_kfree(gsi_ctx->dev, user_data);
  2554. return -GSI_STATUS_INVALID_PARAMS;
  2555. }
  2556. if (erindex < GSI_EVT_RING_MAX) {
  2557. ctx->evtr = &gsi_ctx->evtr[erindex];
  2558. if(ctx->evtr->num_of_chan_allocated
  2559. >= MAX_CHANNELS_SHARING_EVENT_RING) {
  2560. GSIERR(
  2561. "too many channels sharing the same event ring %u\n",
  2562. erindex);
  2563. GSI_ASSERT();
  2564. }
  2565. if (props->prot != GSI_CHAN_PROT_GCI) {
  2566. atomic_inc(&ctx->evtr->chan_ref_cnt);
  2567. if (ctx->evtr->props.exclusive) {
  2568. if (atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
  2569. ctx->evtr->chan
  2570. [ctx->evtr->num_of_chan_allocated++] = ctx;
  2571. }
  2572. else {
  2573. ctx->evtr->chan[ctx->evtr->num_of_chan_allocated++]
  2574. = ctx;
  2575. }
  2576. }
  2577. }
  2578. gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
  2579. spin_lock_init(&ctx->ring.slock);
  2580. gsi_init_chan_ring(props, &ctx->ring);
  2581. if (!props->max_re_expected)
  2582. ctx->props.max_re_expected = ctx->ring.max_num_elem;
  2583. ctx->user_data = user_data;
  2584. *chan_hdl = props->ch_id;
  2585. ctx->allocated = true;
  2586. ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
  2587. atomic_inc(&gsi_ctx->num_chan);
  2588. if (props->prot == GSI_CHAN_PROT_GCI) {
  2589. gsi_ctx->coal_info.ch_id = props->ch_id;
  2590. gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
  2591. }
  2592. return GSI_STATUS_SUCCESS;
  2593. }
  2594. EXPORT_SYMBOL(gsi_alloc_channel);
  2595. static int gsi_alloc_ap_channel(unsigned int chan_hdl)
  2596. {
  2597. struct gsi_chan_ctx *ctx;
  2598. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2599. int res;
  2600. int ee;
  2601. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2602. if (!gsi_ctx) {
  2603. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2604. return -GSI_STATUS_NODEV;
  2605. }
  2606. ctx = &gsi_ctx->chan[chan_hdl];
  2607. if (ctx->allocated) {
  2608. GSIERR("chan %d already allocated\n", chan_hdl);
  2609. return -GSI_STATUS_NODEV;
  2610. }
  2611. memset(ctx, 0, sizeof(*ctx));
  2612. mutex_init(&ctx->mlock);
  2613. init_completion(&ctx->compl);
  2614. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2615. mutex_lock(&gsi_ctx->mlock);
  2616. ee = gsi_ctx->per.ee;
  2617. gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
  2618. ch_cmd.chid = chan_hdl;
  2619. ch_cmd.opcode = op;
  2620. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2621. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2622. if (res == 0) {
  2623. GSIERR("chan_hdl=%u timed out\n", chan_hdl);
  2624. mutex_unlock(&gsi_ctx->mlock);
  2625. return -GSI_STATUS_TIMED_OUT;
  2626. }
  2627. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2628. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2629. chan_hdl, ctx->state);
  2630. mutex_unlock(&gsi_ctx->mlock);
  2631. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2632. }
  2633. mutex_unlock(&gsi_ctx->mlock);
  2634. return GSI_STATUS_SUCCESS;
  2635. }
  2636. static void __gsi_write_channel_scratch(unsigned long chan_hdl,
  2637. union __packed gsi_channel_scratch val)
  2638. {
  2639. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2640. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2641. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2642. gsi_ctx->per.ee, chan_hdl, val.data.word2);
  2643. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2644. gsi_ctx->per.ee, chan_hdl, val.data.word3);
  2645. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2646. gsi_ctx->per.ee, chan_hdl, val.data.word4);
  2647. }
  2648. static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2649. union __packed gsi_wdi3_channel_scratch2_reg val)
  2650. {
  2651. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2652. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2653. }
  2654. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  2655. union __packed gsi_wdi_channel_scratch3_reg val)
  2656. {
  2657. struct gsi_chan_ctx *ctx;
  2658. if (!gsi_ctx) {
  2659. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2660. return -GSI_STATUS_NODEV;
  2661. }
  2662. if (chan_hdl >= gsi_ctx->max_ch) {
  2663. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2664. return -GSI_STATUS_INVALID_PARAMS;
  2665. }
  2666. ctx = &gsi_ctx->chan[chan_hdl];
  2667. mutex_lock(&ctx->mlock);
  2668. ctx->scratch.wdi.endp_metadatareg_offset =
  2669. val.wdi.endp_metadatareg_offset;
  2670. ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
  2671. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2672. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2673. mutex_unlock(&ctx->mlock);
  2674. return GSI_STATUS_SUCCESS;
  2675. }
  2676. EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
  2677. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  2678. union __packed gsi_wdi2_channel_scratch2_reg val)
  2679. {
  2680. struct gsi_chan_ctx *ctx;
  2681. if (!gsi_ctx) {
  2682. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2683. return -GSI_STATUS_NODEV;
  2684. }
  2685. if (chan_hdl >= gsi_ctx->max_ch) {
  2686. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2687. return -GSI_STATUS_INVALID_PARAMS;
  2688. }
  2689. ctx = &gsi_ctx->chan[chan_hdl];
  2690. mutex_lock(&ctx->mlock);
  2691. ctx->scratch.wdi2_new.endp_metadatareg_offset =
  2692. val.wdi.endp_metadatareg_offset;
  2693. ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
  2694. val.wdi.update_ri_moderation_threshold =
  2695. ctx->scratch.wdi2_new.update_ri_moderation_threshold;
  2696. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2697. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2698. mutex_unlock(&ctx->mlock);
  2699. return GSI_STATUS_SUCCESS;
  2700. }
  2701. EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
  2702. static void __gsi_read_channel_scratch(unsigned long chan_hdl,
  2703. union __packed gsi_channel_scratch * val)
  2704. {
  2705. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2706. gsi_ctx->per.ee, chan_hdl);
  2707. val->data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2708. gsi_ctx->per.ee, chan_hdl);
  2709. val->data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2710. gsi_ctx->per.ee, chan_hdl);
  2711. val->data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2712. gsi_ctx->per.ee, chan_hdl);
  2713. }
  2714. static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2715. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2716. {
  2717. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2718. gsi_ctx->per.ee, chan_hdl);
  2719. }
  2720. int gsi_write_channel_scratch(unsigned long chan_hdl,
  2721. union __packed gsi_channel_scratch val)
  2722. {
  2723. struct gsi_chan_ctx *ctx;
  2724. if (!gsi_ctx) {
  2725. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2726. return -GSI_STATUS_NODEV;
  2727. }
  2728. if (chan_hdl >= gsi_ctx->max_ch) {
  2729. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2730. return -GSI_STATUS_INVALID_PARAMS;
  2731. }
  2732. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2733. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2734. GSIERR("bad state %d\n",
  2735. gsi_ctx->chan[chan_hdl].state);
  2736. return -GSI_STATUS_UNSUPPORTED_OP;
  2737. }
  2738. ctx = &gsi_ctx->chan[chan_hdl];
  2739. mutex_lock(&ctx->mlock);
  2740. ctx->scratch = val;
  2741. __gsi_write_channel_scratch(chan_hdl, val);
  2742. mutex_unlock(&ctx->mlock);
  2743. return GSI_STATUS_SUCCESS;
  2744. }
  2745. EXPORT_SYMBOL(gsi_write_channel_scratch);
  2746. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2747. union __packed gsi_wdi3_channel_scratch2_reg val)
  2748. {
  2749. struct gsi_chan_ctx *ctx;
  2750. if (!gsi_ctx) {
  2751. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2752. return -GSI_STATUS_NODEV;
  2753. }
  2754. if (chan_hdl >= gsi_ctx->max_ch) {
  2755. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2756. return -GSI_STATUS_INVALID_PARAMS;
  2757. }
  2758. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2759. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2760. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2761. GSIERR("bad state %d\n",
  2762. gsi_ctx->chan[chan_hdl].state);
  2763. return -GSI_STATUS_UNSUPPORTED_OP;
  2764. }
  2765. ctx = &gsi_ctx->chan[chan_hdl];
  2766. mutex_lock(&ctx->mlock);
  2767. ctx->scratch.data.word3 = val.data.word1;
  2768. __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
  2769. mutex_unlock(&ctx->mlock);
  2770. return GSI_STATUS_SUCCESS;
  2771. }
  2772. EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
  2773. int gsi_read_channel_scratch(unsigned long chan_hdl,
  2774. union __packed gsi_channel_scratch *val)
  2775. {
  2776. struct gsi_chan_ctx *ctx;
  2777. if (!gsi_ctx) {
  2778. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2779. return -GSI_STATUS_NODEV;
  2780. }
  2781. if (chan_hdl >= gsi_ctx->max_ch) {
  2782. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2783. return -GSI_STATUS_INVALID_PARAMS;
  2784. }
  2785. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2786. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2787. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2788. GSIERR("bad state %d\n",
  2789. gsi_ctx->chan[chan_hdl].state);
  2790. return -GSI_STATUS_UNSUPPORTED_OP;
  2791. }
  2792. ctx = &gsi_ctx->chan[chan_hdl];
  2793. mutex_lock(&ctx->mlock);
  2794. __gsi_read_channel_scratch(chan_hdl, val);
  2795. mutex_unlock(&ctx->mlock);
  2796. return GSI_STATUS_SUCCESS;
  2797. }
  2798. EXPORT_SYMBOL(gsi_read_channel_scratch);
  2799. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2800. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2801. {
  2802. struct gsi_chan_ctx *ctx;
  2803. if (!gsi_ctx) {
  2804. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2805. return -GSI_STATUS_NODEV;
  2806. }
  2807. if (chan_hdl >= gsi_ctx->max_ch) {
  2808. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2809. return -GSI_STATUS_INVALID_PARAMS;
  2810. }
  2811. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2812. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2813. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2814. GSIERR("bad state %d\n",
  2815. gsi_ctx->chan[chan_hdl].state);
  2816. return -GSI_STATUS_UNSUPPORTED_OP;
  2817. }
  2818. ctx = &gsi_ctx->chan[chan_hdl];
  2819. mutex_lock(&ctx->mlock);
  2820. __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
  2821. mutex_unlock(&ctx->mlock);
  2822. return GSI_STATUS_SUCCESS;
  2823. }
  2824. EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
  2825. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  2826. struct __packed gsi_mhi_channel_scratch mscr)
  2827. {
  2828. struct gsi_chan_ctx *ctx;
  2829. if (!gsi_ctx) {
  2830. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2831. return -GSI_STATUS_NODEV;
  2832. }
  2833. if (chan_hdl >= gsi_ctx->max_ch) {
  2834. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2835. return -GSI_STATUS_INVALID_PARAMS;
  2836. }
  2837. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2838. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2839. GSIERR("bad state %d\n",
  2840. gsi_ctx->chan[chan_hdl].state);
  2841. return -GSI_STATUS_UNSUPPORTED_OP;
  2842. }
  2843. ctx = &gsi_ctx->chan[chan_hdl];
  2844. mutex_lock(&ctx->mlock);
  2845. ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
  2846. mutex_unlock(&ctx->mlock);
  2847. return GSI_STATUS_SUCCESS;
  2848. }
  2849. EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
  2850. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2851. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2852. {
  2853. if (!gsi_ctx) {
  2854. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2855. return -GSI_STATUS_NODEV;
  2856. }
  2857. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2858. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2859. db_addr_wp_lsb);
  2860. return -GSI_STATUS_INVALID_PARAMS;
  2861. }
  2862. if (chan_hdl >= gsi_ctx->max_ch) {
  2863. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2864. return -GSI_STATUS_INVALID_PARAMS;
  2865. }
  2866. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  2867. GSIERR("bad state %d\n",
  2868. gsi_ctx->chan[chan_hdl].state);
  2869. return -GSI_STATUS_UNSUPPORTED_OP;
  2870. }
  2871. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  2872. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  2873. gsi_ctx->per.ee, chan_hdl);
  2874. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  2875. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2876. gsi_ctx->per.ee, chan_hdl);
  2877. return GSI_STATUS_SUCCESS;
  2878. }
  2879. EXPORT_SYMBOL(gsi_query_channel_db_addr);
  2880. int gsi_pending_irq_type(void)
  2881. {
  2882. int ee = gsi_ctx->per.ee;
  2883. return gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ, ee);
  2884. }
  2885. EXPORT_SYMBOL(gsi_pending_irq_type);
  2886. int gsi_start_channel(unsigned long chan_hdl)
  2887. {
  2888. enum gsi_ch_cmd_opcode op = GSI_CH_START;
  2889. uint32_t val;
  2890. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2891. struct gsi_chan_ctx *ctx;
  2892. if (!gsi_ctx) {
  2893. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2894. return -GSI_STATUS_NODEV;
  2895. }
  2896. if (chan_hdl >= gsi_ctx->max_ch) {
  2897. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2898. return -GSI_STATUS_INVALID_PARAMS;
  2899. }
  2900. ctx = &gsi_ctx->chan[chan_hdl];
  2901. if (ctx->state == GSI_CHAN_STATE_STARTED) {
  2902. GSIDBG("chan_hdl=%lu already in started state\n", chan_hdl);
  2903. return GSI_STATUS_SUCCESS;
  2904. }
  2905. if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
  2906. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2907. ctx->state != GSI_CHAN_STATE_STOPPED) {
  2908. GSIERR("bad state %d\n", ctx->state);
  2909. return -GSI_STATUS_UNSUPPORTED_OP;
  2910. }
  2911. mutex_lock(&gsi_ctx->mlock);
  2912. reinit_completion(&ctx->compl);
  2913. /* check if INTSET is in IRQ mode for GPI channel */
  2914. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  2915. if (ctx->evtr &&
  2916. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2917. val != GSI_INTR_IRQ) {
  2918. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  2919. BUG();
  2920. }
  2921. gsi_ctx->ch_dbg[chan_hdl].ch_start++;
  2922. ch_cmd.chid = chan_hdl;
  2923. ch_cmd.opcode = op;
  2924. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2925. gsi_ctx->per.ee, &ch_cmd);
  2926. GSIDBG("GSI Channel Start, waiting for completion\n");
  2927. gsi_channel_state_change_wait(chan_hdl,
  2928. ctx,
  2929. GSI_START_CMD_TIMEOUT_MS, op);
  2930. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2931. ctx->state != GSI_CHAN_STATE_FLOW_CONTROL) {
  2932. /*
  2933. * Hardware returned unexpected status, unexpected
  2934. * hardware state.
  2935. */
  2936. GSIERR("chan=%lu timed out, unexpected state=%u\n",
  2937. chan_hdl, ctx->state);
  2938. gsi_dump_ch_info(chan_hdl);
  2939. GSI_ASSERT();
  2940. }
  2941. GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
  2942. /* write order MUST be MSB followed by LSB */
  2943. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2944. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  2945. mutex_unlock(&gsi_ctx->mlock);
  2946. return GSI_STATUS_SUCCESS;
  2947. }
  2948. EXPORT_SYMBOL(gsi_start_channel);
  2949. void gsi_dump_ch_info(unsigned long chan_hdl)
  2950. {
  2951. uint32_t val;
  2952. if (!gsi_ctx) {
  2953. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2954. return;
  2955. }
  2956. if (chan_hdl >= gsi_ctx->max_ch) {
  2957. GSIDBG("invalid chan id %u\n", chan_hdl);
  2958. return;
  2959. }
  2960. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2961. gsi_ctx->per.ee, chan_hdl);
  2962. GSIERR("CH%2d CTX0 0x%x\n", chan_hdl, val);
  2963. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2964. gsi_ctx->per.ee, chan_hdl);
  2965. GSIERR("CH%2d CTX1 0x%x\n", chan_hdl, val);
  2966. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2967. gsi_ctx->per.ee, chan_hdl);
  2968. GSIERR("CH%2d CTX2 0x%x\n", chan_hdl, val);
  2969. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2970. gsi_ctx->per.ee, chan_hdl);
  2971. GSIERR("CH%2d CTX3 0x%x\n", chan_hdl, val);
  2972. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  2973. gsi_ctx->per.ee, chan_hdl);
  2974. GSIERR("CH%2d CTX4 0x%x\n", chan_hdl, val);
  2975. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  2976. gsi_ctx->per.ee, chan_hdl);
  2977. GSIERR("CH%2d CTX5 0x%x\n", chan_hdl, val);
  2978. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  2979. gsi_ctx->per.ee, chan_hdl);
  2980. GSIERR("CH%2d CTX6 0x%x\n", chan_hdl, val);
  2981. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  2982. gsi_ctx->per.ee, chan_hdl);
  2983. GSIERR("CH%2d CTX7 0x%x\n", chan_hdl, val);
  2984. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2985. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_8,
  2986. gsi_ctx->per.ee, chan_hdl);
  2987. GSIERR("CH%2d CTX8 0x%x\n", chan_hdl, val);
  2988. }
  2989. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  2990. gsi_ctx->per.ee, chan_hdl);
  2991. GSIERR("CH%2d REFRP 0x%x\n", chan_hdl, val);
  2992. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  2993. gsi_ctx->per.ee, chan_hdl);
  2994. GSIERR("CH%2d REFWP 0x%x\n", chan_hdl, val);
  2995. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  2996. gsi_ctx->per.ee, chan_hdl);
  2997. GSIERR("CH%2d QOS 0x%x\n", chan_hdl, val);
  2998. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2999. gsi_ctx->per.ee, chan_hdl);
  3000. GSIERR("CH%2d SCR0 0x%x\n", chan_hdl, val);
  3001. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  3002. gsi_ctx->per.ee, chan_hdl);
  3003. GSIERR("CH%2d SCR1 0x%x\n", chan_hdl, val);
  3004. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  3005. gsi_ctx->per.ee, chan_hdl);
  3006. GSIERR("CH%2d SCR2 0x%x\n", chan_hdl, val);
  3007. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  3008. gsi_ctx->per.ee, chan_hdl);
  3009. GSIERR("CH%2d SCR3 0x%x\n", chan_hdl, val);
  3010. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3011. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4,
  3012. gsi_ctx->per.ee, chan_hdl);
  3013. GSIERR("CH%2d SCR4 0x%x\n", chan_hdl, val);
  3014. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_5,
  3015. gsi_ctx->per.ee, chan_hdl);
  3016. GSIERR("CH%2d SCR5 0x%x\n", chan_hdl, val);
  3017. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_6,
  3018. gsi_ctx->per.ee, chan_hdl);
  3019. GSIERR("CH%2d SCR6 0x%x\n", chan_hdl, val);
  3020. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_7,
  3021. gsi_ctx->per.ee, chan_hdl);
  3022. GSIERR("CH%2d SCR7 0x%x\n", chan_hdl, val);
  3023. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_8,
  3024. gsi_ctx->per.ee, chan_hdl);
  3025. GSIERR("CH%2d SCR8 0x%x\n", chan_hdl, val);
  3026. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_9,
  3027. gsi_ctx->per.ee, chan_hdl);
  3028. GSIERR("CH%2d SCR9 0x%x\n", chan_hdl, val);
  3029. }
  3030. return;
  3031. }
  3032. EXPORT_SYMBOL(gsi_dump_ch_info);
  3033. int gsi_stop_channel(unsigned long chan_hdl)
  3034. {
  3035. enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
  3036. int res;
  3037. uint32_t val;
  3038. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  3039. struct gsi_chan_ctx *ctx;
  3040. unsigned long flags;
  3041. if (!gsi_ctx) {
  3042. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3043. return -GSI_STATUS_NODEV;
  3044. }
  3045. if (chan_hdl >= gsi_ctx->max_ch) {
  3046. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3047. return -GSI_STATUS_INVALID_PARAMS;
  3048. }
  3049. ctx = &gsi_ctx->chan[chan_hdl];
  3050. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  3051. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  3052. return GSI_STATUS_SUCCESS;
  3053. }
  3054. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  3055. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  3056. ctx->state != GSI_CHAN_STATE_ERROR) {
  3057. GSIERR("bad state %d\n", ctx->state);
  3058. return -GSI_STATUS_UNSUPPORTED_OP;
  3059. }
  3060. mutex_lock(&gsi_ctx->mlock);
  3061. reinit_completion(&ctx->compl);
  3062. /* check if INTSET is in IRQ mode for GPI channel */
  3063. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  3064. if (ctx->evtr &&
  3065. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  3066. val != GSI_INTR_IRQ) {
  3067. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  3068. BUG();
  3069. }
  3070. gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
  3071. ch_cmd.chid = chan_hdl;
  3072. ch_cmd.opcode = op;
  3073. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  3074. gsi_ctx->per.ee, &ch_cmd);
  3075. GSIDBG("GSI Channel Stop, waiting for completion: 0x%x\n", val);
  3076. gsi_channel_state_change_wait(chan_hdl,
  3077. ctx,
  3078. GSI_STOP_CMD_TIMEOUT_MS, op);
  3079. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  3080. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  3081. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  3082. gsi_dump_ch_info(chan_hdl);
  3083. res = -GSI_STATUS_BAD_STATE;
  3084. BUG();
  3085. goto free_lock;
  3086. }
  3087. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  3088. GSIERR("chan=%lu busy try again\n", chan_hdl);
  3089. res = -GSI_STATUS_AGAIN;
  3090. goto free_lock;
  3091. }
  3092. /* If channel is stopped succesfully and has an event with IRQ type MSI
  3093. - clear IEOB */
  3094. if (ctx->evtr && ctx->evtr->props.intr == GSI_INTR_MSI) {
  3095. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3096. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3097. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3098. gsi_ctx->per.ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3099. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3100. } else {
  3101. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3102. gsi_ctx->per.ee, 1 << ctx->evtr->id);
  3103. }
  3104. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3105. }
  3106. res = GSI_STATUS_SUCCESS;
  3107. free_lock:
  3108. mutex_unlock(&gsi_ctx->mlock);
  3109. return res;
  3110. }
  3111. EXPORT_SYMBOL(gsi_stop_channel);
  3112. int gsi_stop_db_channel(unsigned long chan_hdl)
  3113. {
  3114. enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
  3115. int res;
  3116. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  3117. struct gsi_chan_ctx *ctx;
  3118. if (!gsi_ctx) {
  3119. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3120. return -GSI_STATUS_NODEV;
  3121. }
  3122. if (chan_hdl >= gsi_ctx->max_ch) {
  3123. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3124. return -GSI_STATUS_INVALID_PARAMS;
  3125. }
  3126. ctx = &gsi_ctx->chan[chan_hdl];
  3127. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  3128. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  3129. return GSI_STATUS_SUCCESS;
  3130. }
  3131. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  3132. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  3133. GSIERR("bad state %d\n", ctx->state);
  3134. return -GSI_STATUS_UNSUPPORTED_OP;
  3135. }
  3136. mutex_lock(&gsi_ctx->mlock);
  3137. reinit_completion(&ctx->compl);
  3138. gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
  3139. ch_cmd.chid = chan_hdl;
  3140. ch_cmd.opcode = op;
  3141. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  3142. gsi_ctx->per.ee, &ch_cmd);
  3143. res = wait_for_completion_timeout(&ctx->compl,
  3144. msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
  3145. if (res == 0) {
  3146. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  3147. res = -GSI_STATUS_TIMED_OUT;
  3148. goto free_lock;
  3149. }
  3150. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  3151. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  3152. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  3153. res = -GSI_STATUS_BAD_STATE;
  3154. goto free_lock;
  3155. }
  3156. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  3157. GSIERR("chan=%lu busy try again\n", chan_hdl);
  3158. res = -GSI_STATUS_AGAIN;
  3159. goto free_lock;
  3160. }
  3161. res = GSI_STATUS_SUCCESS;
  3162. free_lock:
  3163. mutex_unlock(&gsi_ctx->mlock);
  3164. return res;
  3165. }
  3166. EXPORT_SYMBOL(gsi_stop_db_channel);
  3167. int gsi_reset_channel(unsigned long chan_hdl)
  3168. {
  3169. enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
  3170. int res;
  3171. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  3172. struct gsi_chan_ctx *ctx;
  3173. bool reset_done = false;
  3174. uint32_t retry_cnt = 0;
  3175. if (!gsi_ctx) {
  3176. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3177. return -GSI_STATUS_NODEV;
  3178. }
  3179. if (chan_hdl >= gsi_ctx->max_ch) {
  3180. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3181. return -GSI_STATUS_INVALID_PARAMS;
  3182. }
  3183. ctx = &gsi_ctx->chan[chan_hdl];
  3184. /*
  3185. * In WDI3 case, if SAP enabled but no client connected,
  3186. * GSI will be in allocated state. When SAP disabled,
  3187. * gsi_reset_channel will be called and reset is needed.
  3188. */
  3189. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  3190. ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3191. GSIERR("bad state %d\n", ctx->state);
  3192. return -GSI_STATUS_UNSUPPORTED_OP;
  3193. }
  3194. mutex_lock(&gsi_ctx->mlock);
  3195. reset:
  3196. reinit_completion(&ctx->compl);
  3197. gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
  3198. ch_cmd.chid = chan_hdl;
  3199. ch_cmd.opcode = op;
  3200. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  3201. gsi_ctx->per.ee, &ch_cmd);
  3202. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  3203. if (res == 0) {
  3204. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  3205. mutex_unlock(&gsi_ctx->mlock);
  3206. return -GSI_STATUS_TIMED_OUT;
  3207. }
  3208. revrfy_chnlstate:
  3209. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3210. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  3211. ctx->state);
  3212. /* GSI register update state not sync with gsi channel
  3213. * context state not sync, need to wait for 1ms to sync.
  3214. */
  3215. retry_cnt++;
  3216. if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
  3217. usleep_range(GSI_RESET_WA_MIN_SLEEP,
  3218. GSI_RESET_WA_MAX_SLEEP);
  3219. goto revrfy_chnlstate;
  3220. }
  3221. /*
  3222. * Hardware returned incorrect state, unexpected
  3223. * hardware state.
  3224. */
  3225. GSI_ASSERT();
  3226. }
  3227. /* Hardware issue fixed from GSI 2.0 and no need for the WA */
  3228. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  3229. reset_done = true;
  3230. /* workaround: reset GSI producers again */
  3231. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
  3232. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  3233. reset_done = true;
  3234. goto reset;
  3235. }
  3236. if (ctx->props.cleanup_cb)
  3237. gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
  3238. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  3239. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  3240. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  3241. /* restore scratch */
  3242. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  3243. mutex_unlock(&gsi_ctx->mlock);
  3244. return GSI_STATUS_SUCCESS;
  3245. }
  3246. EXPORT_SYMBOL(gsi_reset_channel);
  3247. int gsi_dealloc_channel(unsigned long chan_hdl)
  3248. {
  3249. enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
  3250. int res;
  3251. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  3252. struct gsi_chan_ctx *ctx;
  3253. if (!gsi_ctx) {
  3254. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3255. return -GSI_STATUS_NODEV;
  3256. }
  3257. if (chan_hdl >= gsi_ctx->max_ch) {
  3258. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3259. return -GSI_STATUS_INVALID_PARAMS;
  3260. }
  3261. ctx = &gsi_ctx->chan[chan_hdl];
  3262. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3263. GSIERR("bad state %d\n", ctx->state);
  3264. return -GSI_STATUS_UNSUPPORTED_OP;
  3265. }
  3266. /*In GSI_VER_2_2 version deallocation channel not supported*/
  3267. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  3268. mutex_lock(&gsi_ctx->mlock);
  3269. reinit_completion(&ctx->compl);
  3270. gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
  3271. ch_cmd.chid = chan_hdl;
  3272. ch_cmd.opcode = op;
  3273. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  3274. gsi_ctx->per.ee, &ch_cmd);
  3275. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  3276. if (res == 0) {
  3277. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  3278. mutex_unlock(&gsi_ctx->mlock);
  3279. return -GSI_STATUS_TIMED_OUT;
  3280. }
  3281. if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
  3282. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  3283. ctx->state);
  3284. /* Hardware returned incorrect value */
  3285. GSI_ASSERT();
  3286. }
  3287. mutex_unlock(&gsi_ctx->mlock);
  3288. } else {
  3289. mutex_lock(&gsi_ctx->mlock);
  3290. GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
  3291. ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
  3292. GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
  3293. ctx->state);
  3294. mutex_unlock(&gsi_ctx->mlock);
  3295. }
  3296. devm_kfree(gsi_ctx->dev, ctx->user_data);
  3297. ctx->allocated = false;
  3298. if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI)) {
  3299. atomic_dec(&ctx->evtr->chan_ref_cnt);
  3300. ctx->evtr->num_of_chan_allocated--;
  3301. }
  3302. atomic_dec(&gsi_ctx->num_chan);
  3303. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3304. gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
  3305. gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
  3306. }
  3307. return GSI_STATUS_SUCCESS;
  3308. }
  3309. EXPORT_SYMBOL(gsi_dealloc_channel);
  3310. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
  3311. {
  3312. unsigned long now = jiffies_to_msecs(jiffies);
  3313. unsigned long elapsed;
  3314. if (used == 0) {
  3315. elapsed = now - ctx->stats.dp.last_timestamp;
  3316. if (ctx->stats.dp.empty_time < elapsed)
  3317. ctx->stats.dp.empty_time = elapsed;
  3318. }
  3319. if (used <= ctx->props.max_re_expected / 3)
  3320. ++ctx->stats.dp.ch_below_lo;
  3321. else if (used <= 2 * ctx->props.max_re_expected / 3)
  3322. ++ctx->stats.dp.ch_below_hi;
  3323. else
  3324. ++ctx->stats.dp.ch_above_hi;
  3325. ctx->stats.dp.last_timestamp = now;
  3326. }
  3327. static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
  3328. uint16_t *num_free_re)
  3329. {
  3330. uint16_t start;
  3331. uint16_t end;
  3332. uint64_t rp;
  3333. int ee = gsi_ctx->per.ee;
  3334. uint16_t used;
  3335. WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
  3336. if (!ctx->evtr) {
  3337. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3338. ee, ctx->props.ch_id);
  3339. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3340. ctx->ring.rp = rp;
  3341. } else {
  3342. rp = ctx->ring.rp_local;
  3343. }
  3344. start = gsi_find_idx_from_addr(&ctx->ring, rp);
  3345. end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3346. if (end >= start)
  3347. used = end - start;
  3348. else
  3349. used = ctx->ring.max_num_elem + 1 - (start - end);
  3350. *num_free_re = ctx->ring.max_num_elem - used;
  3351. }
  3352. int gsi_query_channel_info(unsigned long chan_hdl,
  3353. struct gsi_chan_info *info)
  3354. {
  3355. struct gsi_chan_ctx *ctx;
  3356. spinlock_t *slock;
  3357. unsigned long flags;
  3358. uint64_t rp;
  3359. uint64_t wp;
  3360. int ee;
  3361. if (!gsi_ctx) {
  3362. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3363. return -GSI_STATUS_NODEV;
  3364. }
  3365. if (chan_hdl >= gsi_ctx->max_ch || !info) {
  3366. GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
  3367. return -GSI_STATUS_INVALID_PARAMS;
  3368. }
  3369. ctx = &gsi_ctx->chan[chan_hdl];
  3370. if (ctx->evtr) {
  3371. slock = &ctx->evtr->ring.slock;
  3372. info->evt_valid = true;
  3373. } else {
  3374. slock = &ctx->ring.slock;
  3375. info->evt_valid = false;
  3376. }
  3377. spin_lock_irqsave(slock, flags);
  3378. ee = gsi_ctx->per.ee;
  3379. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3380. ee, ctx->props.ch_id);
  3381. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  3382. ee, ctx->props.ch_id)) << 32;
  3383. ctx->ring.rp = rp;
  3384. info->rp = rp;
  3385. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3386. ee, ctx->props.ch_id);
  3387. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  3388. ee, ctx->props.ch_id)) << 32;
  3389. ctx->ring.wp = wp;
  3390. info->wp = wp;
  3391. if (info->evt_valid) {
  3392. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4,
  3393. ee, ctx->evtr->id);
  3394. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_5,
  3395. ee, ctx->evtr->id)) << 32;
  3396. info->evt_rp = rp;
  3397. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3398. ee, ctx->evtr->id);
  3399. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_7,
  3400. ee, ctx->evtr->id)) << 32;
  3401. info->evt_wp = wp;
  3402. }
  3403. spin_unlock_irqrestore(slock, flags);
  3404. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
  3405. chan_hdl, info->rp, info->wp,
  3406. info->evt_valid, info->evt_rp, info->evt_wp);
  3407. return GSI_STATUS_SUCCESS;
  3408. }
  3409. EXPORT_SYMBOL(gsi_query_channel_info);
  3410. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
  3411. {
  3412. struct gsi_chan_ctx *ctx;
  3413. struct gsi_evt_ctx *ev_ctx;
  3414. spinlock_t *slock;
  3415. unsigned long flags;
  3416. uint64_t rp;
  3417. uint64_t wp;
  3418. uint64_t rp_local;
  3419. int ee;
  3420. if (!gsi_ctx) {
  3421. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3422. return -GSI_STATUS_NODEV;
  3423. }
  3424. if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
  3425. GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
  3426. chan_hdl, is_empty);
  3427. return -GSI_STATUS_INVALID_PARAMS;
  3428. }
  3429. ctx = &gsi_ctx->chan[chan_hdl];
  3430. ee = gsi_ctx->per.ee;
  3431. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3432. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3433. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3434. return -GSI_STATUS_UNSUPPORTED_OP;
  3435. }
  3436. if (ctx->evtr)
  3437. slock = &ctx->evtr->ring.slock;
  3438. else
  3439. slock = &ctx->ring.slock;
  3440. spin_lock_irqsave(slock, flags);
  3441. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
  3442. ev_ctx = &gsi_ctx->evtr[ctx->evtr->id];
  3443. /* Read the event ring rp from DDR to avoid mismatch */
  3444. rp = ev_ctx->props.gsi_read_event_ring_rp(&ev_ctx->props,
  3445. ev_ctx->id, ee);
  3446. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3447. ctx->evtr->ring.rp = rp;
  3448. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3449. ee, ctx->evtr->id);
  3450. wp |= ctx->evtr->ring.wp & GSI_MSB_MASK;
  3451. ctx->evtr->ring.wp = wp;
  3452. rp_local = ctx->evtr->ring.rp_local;
  3453. } else {
  3454. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3455. ee, ctx->props.ch_id);
  3456. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3457. ctx->ring.rp = rp;
  3458. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3459. ee, ctx->props.ch_id);
  3460. wp |= ctx->ring.wp & GSI_MSB_MASK;
  3461. ctx->ring.wp = wp;
  3462. rp_local = ctx->ring.rp_local;
  3463. }
  3464. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  3465. *is_empty = (rp_local == rp) ? true : false;
  3466. else
  3467. *is_empty = (wp == rp) ? true : false;
  3468. spin_unlock_irqrestore(slock, flags);
  3469. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
  3470. GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3471. chan_hdl, ctx->evtr->id, rp, wp, rp_local);
  3472. else
  3473. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3474. chan_hdl, rp, wp, rp_local);
  3475. return GSI_STATUS_SUCCESS;
  3476. }
  3477. EXPORT_SYMBOL(gsi_is_channel_empty);
  3478. bool gsi_is_event_pending(unsigned long chan_hdl) {
  3479. struct gsi_chan_ctx *ctx;
  3480. uint64_t rp;
  3481. uint64_t rp_local;
  3482. int ee;
  3483. if (chan_hdl >= gsi_ctx->max_ch) {
  3484. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3485. return false;
  3486. }
  3487. ctx = &gsi_ctx->chan[chan_hdl];
  3488. ee = gsi_ctx->per.ee;
  3489. /* read only, updating will be handled in NAPI context if needed */
  3490. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3491. &ctx->evtr->props, ctx->evtr->id, ee);
  3492. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3493. rp_local = ctx->evtr->ring.rp_local;
  3494. return rp != rp_local;
  3495. }
  3496. EXPORT_SYMBOL(gsi_is_event_pending);
  3497. int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
  3498. {
  3499. int i;
  3500. int end;
  3501. if (!ctx->user_data[idx].valid) {
  3502. ctx->user_data[idx].valid = true;
  3503. return idx;
  3504. }
  3505. /*
  3506. * at this point we need to find an "escape buffer" for the cookie
  3507. * as the userdata in this spot is in use. This happens if the TRE at
  3508. * idx is not completed yet and it is getting reused by a new TRE.
  3509. */
  3510. ctx->stats.userdata_in_use++;
  3511. end = ctx->ring.max_num_elem + 1;
  3512. for (i = 0; i < GSI_VEID_MAX; i++) {
  3513. if (!ctx->user_data[end + i].valid) {
  3514. ctx->user_data[end + i].valid = true;
  3515. return end + i;
  3516. }
  3517. }
  3518. /* Go over original userdata when escape buffer is full (costly) */
  3519. GSIDBG("escape buffer is full\n");
  3520. for (i = 0; i < end; i++) {
  3521. if (!ctx->user_data[i].valid) {
  3522. ctx->user_data[i].valid = true;
  3523. return i;
  3524. }
  3525. }
  3526. /* Everything is full (possibly a stall) */
  3527. GSIERR("both userdata array and escape buffer is full\n");
  3528. BUG();
  3529. return 0xFFFF;
  3530. }
  3531. int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
  3532. struct gsi_xfer_elem *xfer)
  3533. {
  3534. struct gsi_gci_tre gci_tre;
  3535. struct gsi_gci_tre *tre_gci_ptr;
  3536. uint16_t idx;
  3537. memset(&gci_tre, 0, sizeof(gci_tre));
  3538. if (xfer->addr & 0xFFFFFF0000000000) {
  3539. GSIERR("chan_hdl=%u add too large=%llx\n",
  3540. ctx->props.ch_id, xfer->addr);
  3541. return -EINVAL;
  3542. }
  3543. if (xfer->type != GSI_XFER_ELEM_DATA) {
  3544. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3545. xfer->type);
  3546. return -EINVAL;
  3547. }
  3548. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3549. tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
  3550. idx * ctx->ring.elem_sz);
  3551. gci_tre.buffer_ptr = xfer->addr;
  3552. gci_tre.buf_len = xfer->len;
  3553. gci_tre.re_type = GSI_RE_COAL;
  3554. gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
  3555. if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
  3556. return -EPERM;
  3557. /* write the TRE to ring */
  3558. *tre_gci_ptr = gci_tre;
  3559. ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
  3560. return 0;
  3561. }
  3562. int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
  3563. struct gsi_xfer_elem *xfer)
  3564. {
  3565. struct gsi_tre tre;
  3566. struct gsi_tre *tre_ptr;
  3567. uint16_t idx;
  3568. memset(&tre, 0, sizeof(tre));
  3569. tre.buffer_ptr = xfer->addr;
  3570. tre.buf_len = xfer->len;
  3571. if (xfer->type == GSI_XFER_ELEM_DATA) {
  3572. tre.re_type = GSI_RE_XFER;
  3573. } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
  3574. tre.re_type = GSI_RE_IMMD_CMD;
  3575. } else if (xfer->type == GSI_XFER_ELEM_NOP) {
  3576. tre.re_type = GSI_RE_NOP;
  3577. } else {
  3578. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3579. xfer->type);
  3580. return -EINVAL;
  3581. }
  3582. tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
  3583. tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
  3584. tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
  3585. tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
  3586. if (unlikely(ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3587. GSIERR("bad state %d\n", ctx->state);
  3588. return -GSI_STATUS_UNSUPPORTED_OP;
  3589. }
  3590. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3591. tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
  3592. idx * ctx->ring.elem_sz);
  3593. /* write the TRE to ring */
  3594. *tre_ptr = tre;
  3595. ctx->user_data[idx].valid = true;
  3596. ctx->user_data[idx].p = xfer->xfer_user_data;
  3597. return 0;
  3598. }
  3599. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  3600. struct gsi_xfer_elem *xfer, bool ring_db)
  3601. {
  3602. struct gsi_chan_ctx *ctx;
  3603. uint16_t free;
  3604. uint64_t wp_rollback;
  3605. int i;
  3606. spinlock_t *slock;
  3607. unsigned long flags;
  3608. if (!gsi_ctx) {
  3609. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3610. return -GSI_STATUS_NODEV;
  3611. }
  3612. if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
  3613. GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
  3614. chan_hdl, num_xfers, xfer);
  3615. return -GSI_STATUS_INVALID_PARAMS;
  3616. }
  3617. if (unlikely(gsi_ctx->chan[chan_hdl].state
  3618. == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3619. GSIERR("bad state %d\n",
  3620. gsi_ctx->chan[chan_hdl].state);
  3621. return -GSI_STATUS_UNSUPPORTED_OP;
  3622. }
  3623. ctx = &gsi_ctx->chan[chan_hdl];
  3624. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3625. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3626. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3627. return -GSI_STATUS_UNSUPPORTED_OP;
  3628. }
  3629. if (ctx->evtr)
  3630. slock = &ctx->evtr->ring.slock;
  3631. else
  3632. slock = &ctx->ring.slock;
  3633. spin_lock_irqsave(slock, flags);
  3634. /* allow only ring doorbell */
  3635. if (!num_xfers)
  3636. goto ring_doorbell;
  3637. /*
  3638. * for GCI channels the responsibility is on the caller to make sure
  3639. * there is enough room in the TRE.
  3640. */
  3641. if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3642. __gsi_query_channel_free_re(ctx, &free);
  3643. if (num_xfers > free) {
  3644. GSIERR_RL("chan_hdl=%lu num_xfers=%u free=%u\n",
  3645. chan_hdl, num_xfers, free);
  3646. spin_unlock_irqrestore(slock, flags);
  3647. return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
  3648. }
  3649. }
  3650. wp_rollback = ctx->ring.wp_local;
  3651. for (i = 0; i < num_xfers; i++) {
  3652. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3653. if (__gsi_populate_gci_tre(ctx, &xfer[i]))
  3654. break;
  3655. } else {
  3656. if (__gsi_populate_tre(ctx, &xfer[i]))
  3657. break;
  3658. }
  3659. gsi_incr_ring_wp(&ctx->ring);
  3660. }
  3661. if (i != num_xfers) {
  3662. /* reject all the xfers */
  3663. ctx->ring.wp_local = wp_rollback;
  3664. spin_unlock_irqrestore(slock, flags);
  3665. return -GSI_STATUS_INVALID_PARAMS;
  3666. }
  3667. ctx->stats.queued += num_xfers;
  3668. ring_doorbell:
  3669. if (ring_db) {
  3670. /* ensure TRE is set before ringing doorbell */
  3671. wmb();
  3672. gsi_ring_chan_doorbell(ctx);
  3673. }
  3674. spin_unlock_irqrestore(slock, flags);
  3675. return GSI_STATUS_SUCCESS;
  3676. }
  3677. EXPORT_SYMBOL(gsi_queue_xfer);
  3678. int gsi_start_xfer(unsigned long chan_hdl)
  3679. {
  3680. struct gsi_chan_ctx *ctx;
  3681. if (!gsi_ctx) {
  3682. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3683. return -GSI_STATUS_NODEV;
  3684. }
  3685. if (chan_hdl >= gsi_ctx->max_ch) {
  3686. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3687. return -GSI_STATUS_INVALID_PARAMS;
  3688. }
  3689. ctx = &gsi_ctx->chan[chan_hdl];
  3690. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3691. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3692. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3693. return -GSI_STATUS_UNSUPPORTED_OP;
  3694. }
  3695. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3696. GSIERR("bad state %d\n", ctx->state);
  3697. return -GSI_STATUS_UNSUPPORTED_OP;
  3698. }
  3699. if (ctx->ring.wp == ctx->ring.wp_local)
  3700. return GSI_STATUS_SUCCESS;
  3701. gsi_ring_chan_doorbell(ctx);
  3702. return GSI_STATUS_SUCCESS;
  3703. };
  3704. EXPORT_SYMBOL(gsi_start_xfer);
  3705. int gsi_poll_channel(unsigned long chan_hdl,
  3706. struct gsi_chan_xfer_notify *notify)
  3707. {
  3708. int unused_var;
  3709. return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
  3710. }
  3711. EXPORT_SYMBOL(gsi_poll_channel);
  3712. int gsi_poll_n_channel(unsigned long chan_hdl,
  3713. struct gsi_chan_xfer_notify *notify,
  3714. int expected_num, int *actual_num)
  3715. {
  3716. struct gsi_chan_ctx *ctx;
  3717. uint64_t rp;
  3718. int ee;
  3719. int i;
  3720. unsigned long flags;
  3721. if (!gsi_ctx) {
  3722. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3723. return -GSI_STATUS_NODEV;
  3724. }
  3725. if (chan_hdl >= gsi_ctx->max_ch || !notify ||
  3726. !actual_num || expected_num <= 0) {
  3727. GSIERR("bad params chan_hdl=%lu notify=%pK\n",
  3728. chan_hdl, notify);
  3729. GSIERR("actual_num=%pK expected_num=%d\n",
  3730. actual_num, expected_num);
  3731. return -GSI_STATUS_INVALID_PARAMS;
  3732. }
  3733. ctx = &gsi_ctx->chan[chan_hdl];
  3734. ee = gsi_ctx->per.ee;
  3735. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3736. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3737. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3738. return -GSI_STATUS_UNSUPPORTED_OP;
  3739. }
  3740. /* Before going to poll packet make sure it was in allocated state */
  3741. if (unlikely(ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3742. GSIERR("bad state %d\n", ctx->state);
  3743. return -GSI_STATUS_UNSUPPORTED_OP;
  3744. }
  3745. if (!ctx->evtr) {
  3746. GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
  3747. return -GSI_STATUS_UNSUPPORTED_OP;
  3748. }
  3749. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3750. if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
  3751. /* update rp to see of we have anything new to process */
  3752. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3753. &ctx->evtr->props, ctx->evtr->id, ee);
  3754. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3755. ctx->evtr->ring.rp = rp;
  3756. /* read gsi event ring rp again if last read is empty */
  3757. if (rp == ctx->evtr->ring.rp_local) {
  3758. /* event ring is empty */
  3759. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3760. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3761. ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3762. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3763. }
  3764. else {
  3765. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3766. ee, 1 << ctx->evtr->id);
  3767. }
  3768. /* do another read to close a small window */
  3769. __iowmb();
  3770. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3771. &ctx->evtr->props, ctx->evtr->id, ee);
  3772. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3773. ctx->evtr->ring.rp = rp;
  3774. if (rp == ctx->evtr->ring.rp_local) {
  3775. spin_unlock_irqrestore(
  3776. &ctx->evtr->ring.slock,
  3777. flags);
  3778. ctx->stats.poll_empty++;
  3779. return GSI_STATUS_POLL_EMPTY;
  3780. }
  3781. }
  3782. }
  3783. *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
  3784. ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
  3785. if (*actual_num > expected_num)
  3786. *actual_num = expected_num;
  3787. for (i = 0; i < *actual_num; i++)
  3788. gsi_process_evt_re(ctx->evtr, notify + i, false);
  3789. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3790. ctx->stats.poll_ok++;
  3791. return GSI_STATUS_SUCCESS;
  3792. }
  3793. EXPORT_SYMBOL(gsi_poll_n_channel);
  3794. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
  3795. {
  3796. struct gsi_chan_ctx *ctx, *coal_ctx;
  3797. enum gsi_chan_mode curr;
  3798. unsigned long flags;
  3799. enum gsi_chan_mode chan_mode;
  3800. int i;
  3801. if (!gsi_ctx) {
  3802. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3803. return -GSI_STATUS_NODEV;
  3804. }
  3805. if (chan_hdl >= gsi_ctx->max_ch) {
  3806. GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
  3807. return -GSI_STATUS_INVALID_PARAMS;
  3808. }
  3809. ctx = &gsi_ctx->chan[chan_hdl];
  3810. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3811. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3812. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3813. return -GSI_STATUS_UNSUPPORTED_OP;
  3814. }
  3815. if (!ctx->evtr) {
  3816. GSIERR("cannot configure mode on chan_hdl=%lu\n",
  3817. chan_hdl);
  3818. return -GSI_STATUS_UNSUPPORTED_OP;
  3819. }
  3820. if (atomic_read(&ctx->poll_mode))
  3821. curr = GSI_CHAN_MODE_POLL;
  3822. else
  3823. curr = GSI_CHAN_MODE_CALLBACK;
  3824. if (mode == curr) {
  3825. GSIDBG("already in requested mode %u chan_hdl=%lu\n",
  3826. curr, chan_hdl);
  3827. return -GSI_STATUS_UNSUPPORTED_OP;
  3828. }
  3829. spin_lock_irqsave(&gsi_ctx->slock, flags);
  3830. if (curr == GSI_CHAN_MODE_CALLBACK &&
  3831. mode == GSI_CHAN_MODE_POLL) {
  3832. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3833. if (ctx->evtr->props.intr != GSI_INTR_MSI) {
  3834. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3835. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3836. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3837. 0);
  3838. }
  3839. }
  3840. else {
  3841. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3842. }
  3843. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3844. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3845. gsi_ctx->per.ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3846. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3847. }
  3848. else {
  3849. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3850. gsi_ctx->per.ee, 1 << ctx->evtr->id);
  3851. }
  3852. atomic_set(&ctx->poll_mode, mode);
  3853. for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
  3854. atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
  3855. }
  3856. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
  3857. atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
  3858. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3859. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3860. if (coal_ctx != NULL)
  3861. atomic_set(&coal_ctx->poll_mode, mode);
  3862. }
  3863. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3864. ctx->evtr->id, mode);
  3865. ctx->stats.callback_to_poll++;
  3866. }
  3867. if (curr == GSI_CHAN_MODE_POLL &&
  3868. mode == GSI_CHAN_MODE_CALLBACK) {
  3869. atomic_set(&ctx->poll_mode, mode);
  3870. for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
  3871. atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
  3872. }
  3873. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
  3874. atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
  3875. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3876. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3877. if (coal_ctx != NULL)
  3878. atomic_set(&coal_ctx->poll_mode, mode);
  3879. }
  3880. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3881. if (ctx->evtr->props.intr != GSI_INTR_MSI) {
  3882. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3883. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3884. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3885. ~0);
  3886. }
  3887. }
  3888. else {
  3889. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
  3890. }
  3891. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3892. ctx->evtr->id, mode);
  3893. /*
  3894. * In GSI 2.2 and 2.5 there is a limitation that can lead
  3895. * to losing an interrupt. For these versions an
  3896. * explicit check is needed after enabling the interrupt
  3897. */
  3898. if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
  3899. gsi_ctx->per.ver == GSI_VER_2_5) &&
  3900. !gsi_ctx->per.skip_ieob_mask_wa) {
  3901. u32 src = gsihal_read_reg_n(
  3902. GSI_EE_n_CNTXT_SRC_IEOB_IRQ,
  3903. gsi_ctx->per.ee);
  3904. if (src & (1 << ctx->evtr->id)) {
  3905. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3906. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3907. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3908. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3909. 0);
  3910. gsihal_write_reg_nk(
  3911. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3912. gsi_ctx->per.ee,
  3913. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3914. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3915. }
  3916. else {
  3917. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 <<
  3918. ctx->evtr->id, 0);
  3919. gsihal_write_reg_n(
  3920. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3921. gsi_ctx->per.ee,
  3922. 1 << ctx->evtr->id);
  3923. }
  3924. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3925. spin_lock_irqsave(&ctx->evtr->ring.slock,
  3926. flags);
  3927. chan_mode = atomic_xchg(&ctx->poll_mode,
  3928. GSI_CHAN_MODE_POLL);
  3929. spin_unlock_irqrestore(
  3930. &ctx->evtr->ring.slock, flags);
  3931. ctx->stats.poll_pending_irq++;
  3932. GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
  3933. ctx->stats.poll_pending_irq,
  3934. chan_mode);
  3935. if (chan_mode == GSI_CHAN_MODE_POLL)
  3936. return GSI_STATUS_SUCCESS;
  3937. else
  3938. return -GSI_STATUS_PENDING_IRQ;
  3939. }
  3940. }
  3941. ctx->stats.poll_to_callback++;
  3942. }
  3943. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3944. return GSI_STATUS_SUCCESS;
  3945. }
  3946. EXPORT_SYMBOL(gsi_config_channel_mode);
  3947. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3948. union gsi_channel_scratch *scr)
  3949. {
  3950. struct gsi_chan_ctx *ctx;
  3951. if (!gsi_ctx) {
  3952. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3953. return -GSI_STATUS_NODEV;
  3954. }
  3955. if (!props || !scr) {
  3956. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  3957. return -GSI_STATUS_INVALID_PARAMS;
  3958. }
  3959. if (chan_hdl >= gsi_ctx->max_ch) {
  3960. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3961. return -GSI_STATUS_INVALID_PARAMS;
  3962. }
  3963. ctx = &gsi_ctx->chan[chan_hdl];
  3964. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3965. GSIERR("bad state %d\n", ctx->state);
  3966. return -GSI_STATUS_UNSUPPORTED_OP;
  3967. }
  3968. mutex_lock(&ctx->mlock);
  3969. *props = ctx->props;
  3970. *scr = ctx->scratch;
  3971. mutex_unlock(&ctx->mlock);
  3972. return GSI_STATUS_SUCCESS;
  3973. }
  3974. EXPORT_SYMBOL(gsi_get_channel_cfg);
  3975. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3976. union gsi_channel_scratch *scr)
  3977. {
  3978. struct gsi_chan_ctx *ctx;
  3979. if (!gsi_ctx) {
  3980. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3981. return -GSI_STATUS_NODEV;
  3982. }
  3983. if (!props || gsi_validate_channel_props(props)) {
  3984. GSIERR("bad params props=%pK\n", props);
  3985. return -GSI_STATUS_INVALID_PARAMS;
  3986. }
  3987. if (chan_hdl >= gsi_ctx->max_ch) {
  3988. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3989. return -GSI_STATUS_INVALID_PARAMS;
  3990. }
  3991. ctx = &gsi_ctx->chan[chan_hdl];
  3992. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3993. GSIERR("bad state %d\n", ctx->state);
  3994. return -GSI_STATUS_UNSUPPORTED_OP;
  3995. }
  3996. if (ctx->props.ch_id != props->ch_id ||
  3997. ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
  3998. GSIERR("changing immutable fields not supported\n");
  3999. return -GSI_STATUS_UNSUPPORTED_OP;
  4000. }
  4001. mutex_lock(&ctx->mlock);
  4002. ctx->props = *props;
  4003. if (scr)
  4004. ctx->scratch = *scr;
  4005. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  4006. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  4007. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  4008. /* restore scratch */
  4009. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  4010. mutex_unlock(&ctx->mlock);
  4011. return GSI_STATUS_SUCCESS;
  4012. }
  4013. EXPORT_SYMBOL(gsi_set_channel_cfg);
  4014. static void gsi_configure_ieps(enum gsi_ver ver)
  4015. {
  4016. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_CMD, 1);
  4017. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DB, 2);
  4018. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DIS_COMP, 3);
  4019. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_EMPTY, 4);
  4020. gsihal_write_reg(GSI_GSI_IRAM_PTR_EE_GENERIC_CMD, 5);
  4021. gsihal_write_reg(GSI_GSI_IRAM_PTR_EVENT_GEN_COMP, 6);
  4022. gsihal_write_reg(GSI_GSI_IRAM_PTR_INT_MOD_STOPPED, 7);
  4023. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0, 8);
  4024. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2, 9);
  4025. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1, 10);
  4026. gsihal_write_reg(GSI_GSI_IRAM_PTR_NEW_RE, 11);
  4027. gsihal_write_reg(GSI_GSI_IRAM_PTR_READ_ENG_COMP, 12);
  4028. gsihal_write_reg(GSI_GSI_IRAM_PTR_TIMER_EXPIRED, 13);
  4029. gsihal_write_reg(GSI_GSI_IRAM_PTR_EV_DB, 14);
  4030. gsihal_write_reg(GSI_GSI_IRAM_PTR_UC_GP_INT, 15);
  4031. gsihal_write_reg(GSI_GSI_IRAM_PTR_WRITE_ENG_COMP, 16);
  4032. if (ver >= GSI_VER_2_5)
  4033. gsihal_write_reg(
  4034. GSI_GSI_IRAM_PTR_TLV_CH_NOT_FULL,
  4035. 17);
  4036. if (ver >= GSI_VER_2_11)
  4037. gsihal_write_reg(
  4038. GSI_GSI_IRAM_PTR_MSI_DB,
  4039. 18);
  4040. if (ver >= GSI_VER_3_0)
  4041. gsihal_write_reg(
  4042. GSI_GSI_IRAM_PTR_INT_NOTIFY_MCS,
  4043. 19);
  4044. }
  4045. static void gsi_configure_bck_prs_matrix(void)
  4046. {
  4047. /*
  4048. * For now, these are default values. In the future, GSI FW image will
  4049. * produce optimized back-pressure values based on the FW image.
  4050. */
  4051. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_LSB, 0xfffffffe);
  4052. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_MSB, 0xffffffff);
  4053. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_LSB, 0xffffffbf);
  4054. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_MSB, 0xffffffff);
  4055. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_LSB, 0xffffefff);
  4056. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_MSB, 0xffffffff);
  4057. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_LSB, 0xffffefff);
  4058. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_MSB, 0xffffffff);
  4059. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_LSB, 0x00000000);
  4060. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_MSB, 0x00000000);
  4061. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_LSB, 0xf9ffffff);
  4062. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_MSB, 0xffffffff);
  4063. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_LSB, 0xf9ffffff);
  4064. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_MSB, 0xffffffff);
  4065. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_LSB, 0xffffffff);
  4066. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_MSB, 0xfffffffe);
  4067. gsihal_write_reg(GSI_IC_READ_BCK_PRS_LSB, 0xffffffff);
  4068. gsihal_write_reg(GSI_IC_READ_BCK_PRS_MSB, 0xffffefff);
  4069. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_LSB, 0xffffffff);
  4070. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_MSB, 0xffffdfff);
  4071. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB, 0xffffffff);
  4072. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB, 0xff03ffff);
  4073. }
  4074. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
  4075. {
  4076. if (!gsi_ctx) {
  4077. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4078. return -GSI_STATUS_NODEV;
  4079. }
  4080. if (!gsi_ctx->base) {
  4081. GSIERR("access to GSI HW has not been mapped\n");
  4082. return -GSI_STATUS_INVALID_PARAMS;
  4083. }
  4084. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  4085. GSIERR("Incorrect version %d\n", ver);
  4086. return -GSI_STATUS_ERROR;
  4087. }
  4088. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_MSB, 0);
  4089. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_LSB, per_base_addr);
  4090. gsi_configure_bck_prs_matrix();
  4091. gsi_configure_ieps(ver);
  4092. return 0;
  4093. }
  4094. EXPORT_SYMBOL(gsi_configure_regs);
  4095. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  4096. {
  4097. struct gsihal_reg_gsi_cfg gsi_cfg;
  4098. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  4099. GSIERR("Incorrect version %d\n", ver);
  4100. return -GSI_STATUS_ERROR;
  4101. }
  4102. /* Enable the MCS and set to x2 clocks */
  4103. gsi_cfg.gsi_enable = 1;
  4104. gsi_cfg.double_mcs_clk_freq = 1;
  4105. gsi_cfg.uc_is_mcs = 0;
  4106. gsi_cfg.gsi_pwr_clps = 0;
  4107. gsi_cfg.bp_mtrix_disable = 0;
  4108. if (ver >= GSI_VER_1_2) {
  4109. gsihal_write_reg(GSI_GSI_MCS_CFG, 1);
  4110. gsi_cfg.mcs_enable = 0;
  4111. } else {
  4112. gsi_cfg.mcs_enable = 1;
  4113. }
  4114. /* GSI frequency is peripheral frequency divided by 3 (2+1) */
  4115. if (ver >= GSI_VER_2_5)
  4116. gsi_cfg.sleep_clk_div = 2;
  4117. gsihal_write_reg_fields(GSI_GSI_CFG, &gsi_cfg);
  4118. return 0;
  4119. }
  4120. EXPORT_SYMBOL(gsi_enable_fw);
  4121. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  4122. unsigned long *size, enum gsi_ver ver)
  4123. {
  4124. if (!gsi_ctx) {
  4125. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4126. return;
  4127. }
  4128. if (size)
  4129. *size = gsihal_get_inst_ram_size();
  4130. if (base_offset) {
  4131. *base_offset = gsihal_get_reg_n_ofst(GSI_GSI_INST_RAM_n, 0);
  4132. }
  4133. }
  4134. EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
  4135. /*
  4136. * Dumping the Debug registers for halt issue debugging.
  4137. */
  4138. static void gsi_dump_halt_debug_reg(unsigned int chan_idx, unsigned int ee)
  4139. {
  4140. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  4141. GSIERR("DEBUG_PC_FOR_DEBUG = 0x%x\n",
  4142. gsihal_read_reg(GSI_EE_n_GSI_DEBUG_PC_FOR_DEBUG));
  4143. GSIERR("GSI_DEBUG_BUSY_REG 0x%x\n",
  4144. gsihal_read_reg(GSI_EE_n_GSI_DEBUG_BUSY_REG));
  4145. GSIERR("GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS = 0x%x\n",
  4146. gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, gsi_ctx->per.ee));
  4147. GSIERR("GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS IRQ type = 0x%x\n",
  4148. gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, gsi_ctx->per.ee));
  4149. GSIERR("GSI_EE_n_CNTXT_SCRATCH_0_OFFS = 0x%x\n",
  4150. gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0, gsi_ctx->per.ee));
  4151. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  4152. GSIERR("GSI_EE_n_GSI_CH_k_SCRATCH_4 = 0x%x\n",
  4153. gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4, ee, chan_idx));
  4154. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0, ee, chan_idx, &ch_k_cntxt_0);
  4155. GSIERR("Q6 channel [%d] state = %d\n", chan_idx, ch_k_cntxt_0.chstate);
  4156. }
  4157. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  4158. {
  4159. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
  4160. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4161. int res;
  4162. if (!gsi_ctx) {
  4163. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4164. return -GSI_STATUS_NODEV;
  4165. }
  4166. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4167. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4168. return -GSI_STATUS_INVALID_PARAMS;
  4169. }
  4170. mutex_lock(&gsi_ctx->mlock);
  4171. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4172. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4173. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4174. /* invalidate the response */
  4175. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(
  4176. GSI_EE_n_CNTXT_SCRATCH_0, gsi_ctx->per.ee);
  4177. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4178. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4179. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4180. gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
  4181. cmd.opcode = op;
  4182. cmd.virt_chan_idx = chan_idx;
  4183. cmd.ee = ee;
  4184. gsihal_write_reg_n_fields(GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4185. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4186. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4187. if (res == 0) {
  4188. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4189. res = -GSI_STATUS_TIMED_OUT;
  4190. goto free_lock;
  4191. }
  4192. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4193. gsi_ctx->per.ee);
  4194. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4195. GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
  4196. GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
  4197. *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
  4198. res = -GSI_STATUS_AGAIN;
  4199. goto free_lock;
  4200. }
  4201. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4202. GSIERR("No response received\n");
  4203. gsi_dump_halt_debug_reg(chan_idx, ee);
  4204. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  4205. GSIERR("Reading after usleep scratch 0 reg\n");
  4206. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4207. gsi_ctx->per.ee);
  4208. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4209. GSIERR("No response received second attempt\n");
  4210. gsi_dump_halt_debug_reg(chan_idx, ee);
  4211. res = -GSI_STATUS_ERROR;
  4212. goto free_lock;
  4213. }
  4214. }
  4215. res = GSI_STATUS_SUCCESS;
  4216. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4217. free_lock:
  4218. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4219. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4220. mutex_unlock(&gsi_ctx->mlock);
  4221. return res;
  4222. }
  4223. EXPORT_SYMBOL(gsi_halt_channel_ee);
  4224. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  4225. {
  4226. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
  4227. struct gsi_chan_ctx *ctx;
  4228. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4229. int res;
  4230. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4231. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4232. return -GSI_STATUS_INVALID_PARAMS;
  4233. }
  4234. if (ee == 0)
  4235. return gsi_alloc_ap_channel(chan_idx);
  4236. mutex_lock(&gsi_ctx->mlock);
  4237. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4238. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4239. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4240. /* invalidate the response */
  4241. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4242. gsi_ctx->per.ee);
  4243. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4244. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4245. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4246. cmd.opcode = op;
  4247. cmd.virt_chan_idx = chan_idx;
  4248. cmd.ee = ee;
  4249. gsihal_write_reg_n_fields(
  4250. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4251. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4252. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4253. if (res == 0) {
  4254. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4255. res = -GSI_STATUS_TIMED_OUT;
  4256. goto free_lock;
  4257. }
  4258. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4259. gsi_ctx->per.ee);
  4260. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4261. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
  4262. GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
  4263. *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
  4264. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  4265. goto free_lock;
  4266. }
  4267. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4268. GSIERR("No response received\n");
  4269. res = -GSI_STATUS_ERROR;
  4270. goto free_lock;
  4271. }
  4272. if (ee == 0) {
  4273. ctx = &gsi_ctx->chan[chan_idx];
  4274. gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
  4275. }
  4276. res = GSI_STATUS_SUCCESS;
  4277. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4278. free_lock:
  4279. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4280. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4281. mutex_unlock(&gsi_ctx->mlock);
  4282. return res;
  4283. }
  4284. EXPORT_SYMBOL(gsi_alloc_channel_ee);
  4285. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  4286. int *code)
  4287. {
  4288. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
  4289. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  4290. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4291. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  4292. int res;
  4293. if (!gsi_ctx) {
  4294. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4295. return -GSI_STATUS_NODEV;
  4296. }
  4297. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4298. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4299. return -GSI_STATUS_INVALID_PARAMS;
  4300. }
  4301. mutex_lock(&gsi_ctx->mlock);
  4302. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4303. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4304. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4305. /* invalidate the response */
  4306. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4307. gsi_ctx->per.ee);
  4308. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4309. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4310. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4311. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  4312. cmd.opcode = op;
  4313. cmd.virt_chan_idx = chan_idx;
  4314. cmd.ee = ee;
  4315. gsihal_write_reg_n_fields(
  4316. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4317. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4318. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4319. if (res == 0) {
  4320. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4321. res = -GSI_STATUS_TIMED_OUT;
  4322. goto free_lock;
  4323. }
  4324. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4325. gsi_ctx->per.ee);
  4326. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4327. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  4328. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  4329. chan_idx, ee);
  4330. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  4331. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  4332. goto free_lock;
  4333. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4334. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
  4335. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4336. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX) {
  4337. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  4338. chan_idx, ee);
  4339. GSI_ASSERT();
  4340. }
  4341. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4342. GSIERR("No response received\n");
  4343. res = -GSI_STATUS_ERROR;
  4344. goto free_lock;
  4345. }
  4346. /*Reading current channel state*/
  4347. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  4348. gsi_ctx->per.ee, chan_idx, &ch_k_cntxt_0);
  4349. curr_state = ch_k_cntxt_0.chstate;
  4350. if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  4351. GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
  4352. res = GSI_STATUS_SUCCESS;
  4353. } else {
  4354. GSIERR("ch %u state updated to %u incorrect state\n",
  4355. chan_idx, curr_state);
  4356. res = -GSI_STATUS_ERROR;
  4357. }
  4358. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4359. free_lock:
  4360. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4361. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4362. mutex_unlock(&gsi_ctx->mlock);
  4363. return res;
  4364. }
  4365. EXPORT_SYMBOL(gsi_enable_flow_control_ee);
  4366. int gsi_flow_control_ee(unsigned int chan_idx, int ep_id, unsigned int ee,
  4367. bool enable, bool prmy_scnd_fc, int *code)
  4368. {
  4369. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4370. enum gsi_generic_ee_cmd_opcode op = enable ?
  4371. GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL :
  4372. GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL;
  4373. int res;
  4374. int wait_due_pending = 0;
  4375. uint32_t fc_pending = 0;
  4376. if (!gsi_ctx) {
  4377. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4378. return -GSI_STATUS_NODEV;
  4379. }
  4380. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4381. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4382. return -GSI_STATUS_INVALID_PARAMS;
  4383. }
  4384. GSIDBG("GSI flow control opcode=%d, ch_id=%d\n", op, chan_idx);
  4385. mutex_lock(&gsi_ctx->mlock);
  4386. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4387. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4388. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4389. /* invalidate the response */
  4390. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4391. gsi_ctx->per.ee);
  4392. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4393. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4394. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4395. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  4396. cmd.opcode = op;
  4397. cmd.virt_chan_idx = chan_idx;
  4398. cmd.ee = ee;
  4399. cmd.prmy_scnd_fc = prmy_scnd_fc;
  4400. gsihal_write_reg_n_fields(
  4401. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4402. wait_again:
  4403. fc_pending = gsihal_read_reg_n(GSI_GSI_SHRAM_n,
  4404. (ep_id * GSI_FC_NUM_WORDS_PER_CHNL_SHRAM) + GSI_FC_STATE_INDEX_SHRAM) &
  4405. GSI_FC_PENDING_MASK;
  4406. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4407. msecs_to_jiffies(GSI_FC_CMD_TIMEOUT));
  4408. if (res == 0) {
  4409. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4410. if (op == GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL &&
  4411. wait_due_pending < GSI_FC_MAX_TIMEOUT &&
  4412. fc_pending) {
  4413. wait_due_pending++;
  4414. goto wait_again;
  4415. }
  4416. GSIERR("GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS = 0x%x\n",
  4417. gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, gsi_ctx->per.ee));
  4418. GSIERR("GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS IRQ type = 0x%x\n",
  4419. gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_STTS, gsi_ctx->per.ee));
  4420. }
  4421. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4422. gsi_ctx->per.ee);
  4423. GSIDBG(
  4424. "Flow control command response GENERIC_CMD_RESPONSE_CODE = %u, val = %u\n",
  4425. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code,
  4426. gsi_ctx->scratch.word0.val);
  4427. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4428. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  4429. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  4430. chan_idx, ee);
  4431. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  4432. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  4433. goto free_lock;
  4434. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4435. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE) {
  4436. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  4437. chan_idx, ee);
  4438. GSI_ASSERT();
  4439. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4440. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX) {
  4441. GSIERR("Channel ID = %u ee = %u not allocated\n", chan_idx, ee);
  4442. }
  4443. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4444. GSIERR("No response received\n");
  4445. res = -GSI_STATUS_ERROR;
  4446. GSI_ASSERT();
  4447. goto free_lock;
  4448. }
  4449. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4450. res = GSI_STATUS_SUCCESS;
  4451. free_lock:
  4452. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4453. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4454. mutex_unlock(&gsi_ctx->mlock);
  4455. return res;
  4456. }
  4457. EXPORT_SYMBOL(gsi_flow_control_ee);
  4458. int gsi_query_flow_control_state_ee(unsigned int chan_idx, unsigned int ee,
  4459. bool prmy_scnd_fc, int *code)
  4460. {
  4461. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4462. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_QUERY_FLOW_CHANNEL;
  4463. int res;
  4464. if (!gsi_ctx) {
  4465. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4466. return -GSI_STATUS_NODEV;
  4467. }
  4468. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4469. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4470. return -GSI_STATUS_INVALID_PARAMS;
  4471. }
  4472. mutex_lock(&gsi_ctx->mlock);
  4473. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4474. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4475. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4476. /* invalidate the response */
  4477. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4478. gsi_ctx->per.ee);
  4479. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4480. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4481. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4482. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  4483. cmd.opcode = op;
  4484. cmd.virt_chan_idx = chan_idx;
  4485. cmd.ee = ee;
  4486. cmd.prmy_scnd_fc = prmy_scnd_fc;
  4487. gsihal_write_reg_n_fields(
  4488. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4489. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4490. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4491. if (res == 0) {
  4492. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4493. res = -GSI_STATUS_TIMED_OUT;
  4494. goto free_lock;
  4495. }
  4496. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4497. gsi_ctx->per.ee);
  4498. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val;
  4499. if (prmy_scnd_fc)
  4500. res = (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val ==
  4501. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_SECONDARY)?
  4502. GSI_STATUS_SUCCESS:-GSI_STATUS_ERROR;
  4503. else
  4504. res = (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val ==
  4505. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PRIMARY)?
  4506. GSI_STATUS_SUCCESS:-GSI_STATUS_ERROR;
  4507. free_lock:
  4508. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4509. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4510. mutex_unlock(&gsi_ctx->mlock);
  4511. return res;
  4512. }
  4513. EXPORT_SYMBOL(gsi_query_flow_control_state_ee);
  4514. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
  4515. {
  4516. if (!gsi_ctx) {
  4517. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4518. return -GSI_STATUS_NODEV;
  4519. }
  4520. if (!gsi_ctx->base) {
  4521. GSIERR("access to GSI HW has not been mapped\n");
  4522. return -GSI_STATUS_INVALID_PARAMS;
  4523. }
  4524. gsihal_write_reg_nk(GSI_MAP_EE_n_CH_k_VP_TABLE,
  4525. ee, chan_num, per_ep_index);
  4526. return 0;
  4527. }
  4528. EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
  4529. void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
  4530. uint32_t db_addr_low, uint32_t db_addr_high)
  4531. {
  4532. if (!gsi_ctx) {
  4533. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4534. return;
  4535. }
  4536. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  4537. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_10,
  4538. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  4539. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_11,
  4540. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  4541. } else {
  4542. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_12,
  4543. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  4544. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_13,
  4545. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  4546. }
  4547. }
  4548. EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
  4549. int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
  4550. {
  4551. if (is_rp) {
  4552. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  4553. gsi_ctx->per.ee, chan_hdl);
  4554. } else {
  4555. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4556. gsi_ctx->per.ee, chan_hdl);
  4557. }
  4558. }
  4559. EXPORT_SYMBOL(gsi_get_refetch_reg);
  4560. /*
  4561. * ; +------------------------------------------------------+
  4562. * ; | NTN3 Rx Channel Scratch |
  4563. * ; +-------------+--------------------------------+-------+
  4564. * ; | 32-bit word | Field | Bits |
  4565. * ; +-------------+--------------------------------+-------+
  4566. * ; | 4 | NTN_PENDING_DB_AFTER_ROLLBACK | 18-18 |
  4567. * ; +-------------+--------------------------------+-------+
  4568. * ; | 5 | NTN_MSI_DB_INDEX_VALUE | 0-31 |
  4569. * ; +-------------+--------------------------------+-------+
  4570. * ; | 6 | NTN_RX_CHAIN_COUNTER | 0-31 |
  4571. * ; +-------------+--------------------------------+-------+
  4572. * ; | 7 | NTN_RX_ERR_COUNTER | 0-31 |
  4573. * ; +-------------+--------------------------------+-------+
  4574. * ; | 8 | NTN_ACCUMULATED_TRES_HANDLED | 0-31 |
  4575. * ; +-------------+--------------------------------+-------+
  4576. * ; | 9 | NTN_ROLLBACKS_COUNTER | 0-31 |
  4577. * ; +-------------+--------------------------------+-------+
  4578. * ; | FOR_SEQ_HIGH| NTN_MSI_DB_COUNT | 0-31 |
  4579. * ; +-------------+--------------------------------+-------+
  4580. *
  4581. * ; +------------------------------------------------------+
  4582. * ; | NTN3 Tx Channel Scratch |
  4583. * ; +-------------+--------------------------------+-------+
  4584. * ; | 32-bit word | Field | Bits |
  4585. * ; +-------------+--------------------------------+-------+
  4586. * ; | 4 | NTN_PENDING_DB_AFTER_ROLLBACK | 18-18 |
  4587. * ; +-------------+--------------------------------+-------+
  4588. * ; | 5 | NTN_MSI_DB_INDEX_VALUE | 0-31 |
  4589. * ; +-------------+--------------------------------+-------+
  4590. * ; | 6 | TX_DERR_COUNTER | 0-31 |
  4591. * ; +-------------+--------------------------------+-------+
  4592. * ; | 7 | NTN_TX_OOB_COUNTER | 0-31 |
  4593. * ; +-------------+--------------------------------+-------+
  4594. * ; | 8 | NTN_ACCUMULATED_TRES_HANDLED | 0-31 |
  4595. * ; +-------------+--------------------------------+-------+
  4596. * ; | 9 | NTN_ROLLBACKS_COUNTER | 0-31 |
  4597. * ; +-------------+--------------------------------+-------+
  4598. * ; | FOR_SEQ_HIGH| NTN_MSI_DB_COUNT | 0-31 |
  4599. * ; +-------------+--------------------------------+-------+
  4600. */
  4601. int gsi_ntn3_client_stats_get(unsigned ep_id, int scratch_id, unsigned chan_hdl)
  4602. {
  4603. switch (scratch_id) {
  4604. case -1:
  4605. return gsihal_read_reg_n(GSI_GSI_SHRAM_n, GSI_GSI_SHRAM_n_EP_FOR_SEQ_HIGH_N_GET(ep_id));
  4606. case 4:
  4607. return (gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4, gsi_ctx->per.ee,
  4608. chan_hdl) >> GSI_NTN3_PENDING_DB_AFTER_RB_MASK) &
  4609. GSI_NTN3_PENDING_DB_AFTER_RB_SHIFT;
  4610. break;
  4611. case 5:
  4612. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_5, gsi_ctx->per.ee, chan_hdl);
  4613. break;
  4614. case 6:
  4615. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_6, gsi_ctx->per.ee, chan_hdl);
  4616. break;
  4617. case 7:
  4618. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_7, gsi_ctx->per.ee, chan_hdl);
  4619. break;
  4620. case 8:
  4621. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_8, gsi_ctx->per.ee, chan_hdl);
  4622. break;
  4623. case 9:
  4624. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_9, gsi_ctx->per.ee, chan_hdl);
  4625. break;
  4626. default:
  4627. GSIERR("invalid scratch id %d\n", scratch_id);
  4628. return 0;
  4629. }
  4630. return 0;
  4631. }
  4632. EXPORT_SYMBOL(gsi_ntn3_client_stats_get);
  4633. int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
  4634. unsigned long chan_hdl)
  4635. {
  4636. #define GSI_RTK_ERR_STATS_MASK 0xFFFF
  4637. #define GSI_NTN_ERR_STATS_MASK 0xFFFFFFFF
  4638. #define GSI_AQC_RX_STATUS_MASK 0x1FFF
  4639. #define GSI_AQC_RX_STATUS_SHIFT 0
  4640. #define GSI_AQC_RDM_ERR_MASK 0x1FFF0000
  4641. #define GSI_AQC_RDM_ERR_SHIFT 16
  4642. uint16_t rx_status;
  4643. uint16_t rdm_err;
  4644. uint32_t val;
  4645. /* on newer versions we can read the ch scratch directly from reg */
  4646. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  4647. switch (scratch_id) {
  4648. case 5:
  4649. return gsihal_read_reg_nk(
  4650. GSI_EE_n_GSI_CH_k_SCRATCH_5,
  4651. gsi_ctx->per.ee,
  4652. chan_hdl) & GSI_RTK_ERR_STATS_MASK;
  4653. break;
  4654. case 6:
  4655. return gsihal_read_reg_nk(
  4656. GSI_EE_n_GSI_CH_k_SCRATCH_6,
  4657. gsi_ctx->per.ee,
  4658. chan_hdl) & GSI_NTN_ERR_STATS_MASK;
  4659. break;
  4660. case 7:
  4661. val = gsihal_read_reg_nk(
  4662. GSI_EE_n_GSI_CH_k_SCRATCH_7,
  4663. gsi_ctx->per.ee,
  4664. chan_hdl);
  4665. rx_status = (val & GSI_AQC_RX_STATUS_MASK)
  4666. >> GSI_AQC_RX_STATUS_SHIFT;
  4667. rdm_err = (val & GSI_AQC_RDM_ERR_MASK)
  4668. >> (GSI_AQC_RDM_ERR_SHIFT);
  4669. return rx_status + rdm_err;
  4670. break;
  4671. default:
  4672. GSIERR("invalid scratch id %d\n", scratch_id);
  4673. return 0;
  4674. }
  4675. /* on older versions we need to read the scratch from SHRAM */
  4676. } else {
  4677. /* RTK use scratch 5 */
  4678. if (scratch_id == 5) {
  4679. /*
  4680. * each channel context is 6 lines of 8 bytes, but n in
  4681. * SHRAM_n is in 4 bytes offsets, so multiplying ep_id
  4682. * by 6*2=12 will give the beginning of the required
  4683. * channel context, and then need to add 7 since the
  4684. * channel context layout has the ring rbase (8 bytes)
  4685. * + channel scratch 0-4 (20 bytes) so adding
  4686. * additional 28/4 = 7 to get to scratch 5 of the
  4687. * required channel.
  4688. */
  4689. return gsihal_read_reg_n(
  4690. GSI_GSI_SHRAM_n,
  4691. ep_id * 12 + 7) & GSI_RTK_ERR_STATS_MASK;
  4692. }
  4693. }
  4694. return 0;
  4695. }
  4696. EXPORT_SYMBOL(gsi_get_drop_stats);
  4697. int gsi_get_wp(unsigned long chan_hdl)
  4698. {
  4699. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6, gsi_ctx->per.ee,
  4700. chan_hdl);
  4701. }
  4702. EXPORT_SYMBOL(gsi_get_wp);
  4703. void gsi_wdi3_dump_register(unsigned long chan_hdl)
  4704. {
  4705. uint32_t val;
  4706. if (!gsi_ctx) {
  4707. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4708. return;
  4709. }
  4710. GSIDBG("reg dump ch id %ld\n", chan_hdl);
  4711. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  4712. gsi_ctx->per.ee, chan_hdl);
  4713. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0 0x%x\n", val);
  4714. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  4715. gsi_ctx->per.ee, chan_hdl);
  4716. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1 0x%x\n", val);
  4717. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  4718. gsi_ctx->per.ee, chan_hdl);
  4719. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2 0x%x\n", val);
  4720. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  4721. gsi_ctx->per.ee, chan_hdl);
  4722. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3 0x%x\n", val);
  4723. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  4724. gsi_ctx->per.ee, chan_hdl);
  4725. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4 0x%x\n", val);
  4726. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  4727. gsi_ctx->per.ee, chan_hdl);
  4728. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5 0x%x\n", val);
  4729. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  4730. gsi_ctx->per.ee, chan_hdl);
  4731. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6 0x%x\n", val);
  4732. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  4733. gsi_ctx->per.ee, chan_hdl);
  4734. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7 0x%x\n", val);
  4735. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  4736. gsi_ctx->per.ee, chan_hdl);
  4737. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR 0x%x\n", val);
  4738. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4739. gsi_ctx->per.ee, chan_hdl);
  4740. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR 0x%x\n", val);
  4741. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  4742. gsi_ctx->per.ee, chan_hdl);
  4743. GSIDBG("GSI_EE_n_GSI_CH_k_QOS 0x%x\n", val);
  4744. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4745. gsi_ctx->per.ee, chan_hdl);
  4746. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0 0x%x\n", val);
  4747. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4748. gsi_ctx->per.ee, chan_hdl);
  4749. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1 0x%x\n", val);
  4750. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4751. gsi_ctx->per.ee, chan_hdl);
  4752. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2 0x%x\n", val);
  4753. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4754. gsi_ctx->per.ee, chan_hdl);
  4755. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3 0x%x\n", val);
  4756. }
  4757. EXPORT_SYMBOL(gsi_wdi3_dump_register);
  4758. int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr)
  4759. {
  4760. if (!gsi_ctx) {
  4761. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4762. return -GSI_STATUS_NODEV;
  4763. }
  4764. if (chan_hdl >= gsi_ctx->max_ch) {
  4765. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  4766. return -GSI_STATUS_INVALID_PARAMS;
  4767. }
  4768. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  4769. GSIERR("bad state %d\n",
  4770. gsi_ctx->chan[chan_hdl].state);
  4771. return -GSI_STATUS_UNSUPPORTED_OP;
  4772. }
  4773. *addr = (phys_addr_t)(gsi_ctx->per.phys_addr +
  4774. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_CNTXT_8,
  4775. gsi_ctx->per.ee, chan_hdl));
  4776. return 0;
  4777. }
  4778. EXPORT_SYMBOL(gsi_query_msi_addr);
  4779. int gsi_query_device_msi_addr(u64 *addr)
  4780. {
  4781. if (!gsi_ctx) {
  4782. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4783. return -GSI_STATUS_NODEV;
  4784. }
  4785. if (gsi_ctx->msi_addr_set)
  4786. *addr = gsi_ctx->msi_addr;
  4787. else
  4788. *addr = 0;
  4789. GSIDBG("Device MSI Addr: 0x%lx", *addr);
  4790. return 0;
  4791. }
  4792. EXPORT_SYMBOL(gsi_query_device_msi_addr);
  4793. uint64_t gsi_read_event_ring_wp(int evtr_id, int ee)
  4794. {
  4795. uint64_t wp;
  4796. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  4797. ee, evtr_id);
  4798. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_7,
  4799. ee, evtr_id)) << 32;
  4800. return wp;
  4801. }
  4802. EXPORT_SYMBOL(gsi_read_event_ring_wp);
  4803. uint64_t gsi_read_event_ring_bp(int evt_hdl)
  4804. {
  4805. return gsi_ctx->evtr[evt_hdl].ring.base;
  4806. }
  4807. EXPORT_SYMBOL(gsi_read_event_ring_bp);
  4808. uint64_t gsi_get_evt_ring_rp(int evt_hdl)
  4809. {
  4810. return gsi_ctx->evtr[evt_hdl].props.gsi_read_event_ring_rp(
  4811. &gsi_ctx->evtr[evt_hdl].props, evt_hdl, gsi_ctx->per.ee);
  4812. }
  4813. EXPORT_SYMBOL(gsi_get_evt_ring_rp);
  4814. uint64_t gsi_read_chan_ring_rp(int chan_id, int ee)
  4815. {
  4816. uint64_t rp;
  4817. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  4818. ee, chan_id);
  4819. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  4820. ee, chan_id)) << 32;
  4821. return rp;
  4822. }
  4823. EXPORT_SYMBOL(gsi_read_chan_ring_rp);
  4824. uint64_t gsi_read_chan_ring_wp(int chan_id, int ee)
  4825. {
  4826. uint64_t wp;
  4827. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  4828. ee, chan_id);
  4829. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  4830. ee, chan_id)) << 32;
  4831. return wp;
  4832. }
  4833. EXPORT_SYMBOL(gsi_read_chan_ring_wp);
  4834. uint64_t gsi_read_chan_ring_bp(int chan_hdl)
  4835. {
  4836. return gsi_ctx->chan[chan_hdl].ring.base;
  4837. }
  4838. EXPORT_SYMBOL(gsi_read_chan_ring_bp);
  4839. uint64_t gsi_read_chan_ring_re_fetch_wp(int chan_id, int ee)
  4840. {
  4841. uint64_t wp;
  4842. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4843. ee, chan_id);
  4844. return wp;
  4845. }
  4846. EXPORT_SYMBOL(gsi_read_chan_ring_re_fetch_wp);
  4847. enum gsi_chan_prot gsi_get_chan_prot_type(int chan_hdl)
  4848. {
  4849. return gsi_ctx->chan[chan_hdl].props.prot;
  4850. }
  4851. EXPORT_SYMBOL(gsi_get_chan_prot_type);
  4852. enum gsi_chan_state gsi_get_chan_state(int chan_hdl)
  4853. {
  4854. return gsi_ctx->chan[chan_hdl].state;
  4855. }
  4856. EXPORT_SYMBOL(gsi_get_chan_state);
  4857. int gsi_get_chan_poll_mode(int chan_hdl)
  4858. {
  4859. return atomic_read(&gsi_ctx->chan[chan_hdl].poll_mode);
  4860. }
  4861. EXPORT_SYMBOL(gsi_get_chan_poll_mode);
  4862. uint32_t gsi_get_ring_len(int chan_hdl)
  4863. {
  4864. return gsi_ctx->chan[chan_hdl].ring.len;
  4865. }
  4866. EXPORT_SYMBOL(gsi_get_ring_len);
  4867. uint8_t gsi_get_chan_props_db_in_bytes(int chan_hdl)
  4868. {
  4869. return gsi_ctx->chan[chan_hdl].props.db_in_bytes;
  4870. }
  4871. EXPORT_SYMBOL(gsi_get_chan_props_db_in_bytes);
  4872. int gsi_get_peripheral_ee(void)
  4873. {
  4874. return gsi_ctx->per.ee;
  4875. }
  4876. EXPORT_SYMBOL(gsi_get_peripheral_ee);
  4877. uint32_t gsi_get_chan_stop_stm(int chan_id, int ee)
  4878. {
  4879. uint32_t ch_scratch;
  4880. ch_scratch = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4, ee, chan_id);
  4881. /* Only bits 28 - 31 for STM */
  4882. return ((ch_scratch & 0xF0000000) >> 24);
  4883. }
  4884. EXPORT_SYMBOL(gsi_get_chan_stop_stm);
  4885. enum gsi_evt_ring_elem_size gsi_get_evt_ring_re_size(int evt_hdl)
  4886. {
  4887. return gsi_ctx->evtr[evt_hdl].props.re_size;
  4888. }
  4889. EXPORT_SYMBOL(gsi_get_evt_ring_re_size);
  4890. uint32_t gsi_get_evt_ring_len(int evt_hdl)
  4891. {
  4892. return gsi_ctx->evtr[evt_hdl].ring.len;
  4893. }
  4894. EXPORT_SYMBOL(gsi_get_evt_ring_len);
  4895. void gsi_update_almst_empty_thrshold(unsigned long chan_hdl, unsigned short threshold)
  4896. {
  4897. gsihal_write_reg_nk(GSI_EE_n_CH_k_CH_ALMST_EMPTY_THRSHOLD,
  4898. gsi_ctx->per.ee, chan_hdl, threshold);
  4899. }
  4900. EXPORT_SYMBOL(gsi_update_almst_empty_thrshold);
  4901. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  4902. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
  4903. {
  4904. union __packed gsi_channel_scratch scr;
  4905. /* below sequence is not atomic. assumption is sequencer specific fields
  4906. * will remain unchanged across this sequence
  4907. */
  4908. /* READ */
  4909. scr.data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4910. gsi_ctx->per.ee, chan_hdl);
  4911. scr.data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4912. gsi_ctx->per.ee, chan_hdl);
  4913. scr.data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4914. gsi_ctx->per.ee, chan_hdl);
  4915. scr.data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4916. gsi_ctx->per.ee, chan_hdl);
  4917. /* UPDATE */
  4918. scr.mhi.polling_mode = mscr.polling_mode;
  4919. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  4920. scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
  4921. scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
  4922. }
  4923. /* WRITE */
  4924. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4925. gsi_ctx->per.ee, chan_hdl, scr.data.word1);
  4926. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4927. gsi_ctx->per.ee, chan_hdl, scr.data.word2);
  4928. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4929. gsi_ctx->per.ee, chan_hdl, scr.data.word3);
  4930. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4931. gsi_ctx->per.ee, chan_hdl, scr.data.word4);
  4932. return scr;
  4933. }
  4934. /**
  4935. * gsi_get_hw_profiling_stats() - Query GSI HW profiling stats
  4936. * @stats: [out] stats blob from client populated by driver
  4937. *
  4938. * Returns: 0 on success, negative on failure
  4939. *
  4940. */
  4941. int gsi_get_hw_profiling_stats(struct gsi_hw_profiling_data *stats)
  4942. {
  4943. if (stats == NULL) {
  4944. GSIERR("bad parms NULL stats == NULL\n");
  4945. return -EINVAL;
  4946. }
  4947. stats->bp_cnt = (u64)gsihal_read_reg(
  4948. GSI_GSI_MCS_PROFILING_BP_CNT_LSB) +
  4949. ((u64)gsihal_read_reg(
  4950. GSI_GSI_MCS_PROFILING_BP_CNT_MSB) << 32);
  4951. stats->bp_and_pending_cnt = (u64)gsihal_read_reg(
  4952. GSI_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_LSB) +
  4953. ((u64)gsihal_read_reg(
  4954. GSI_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_MSB) << 32);
  4955. stats->mcs_busy_cnt = (u64)gsihal_read_reg(
  4956. GSI_GSI_MCS_PROFILING_MCS_BUSY_CNT_LSB) +
  4957. ((u64)gsihal_read_reg(
  4958. GSI_GSI_MCS_PROFILING_MCS_BUSY_CNT_MSB) << 32);
  4959. stats->mcs_idle_cnt = (u64)gsihal_read_reg(
  4960. GSI_GSI_MCS_PROFILING_MCS_IDLE_CNT_LSB) +
  4961. ((u64)gsihal_read_reg(
  4962. GSI_GSI_MCS_PROFILING_MCS_IDLE_CNT_MSB) << 32);
  4963. return 0;
  4964. }
  4965. /**
  4966. * gsi_get_fw_version() - Query GSI FW version
  4967. * @ver: [out] ver blob from client populated by driver
  4968. *
  4969. * Returns: 0 on success, negative on failure
  4970. *
  4971. */
  4972. int gsi_get_fw_version(struct gsi_fw_version *ver)
  4973. {
  4974. u32 raw = 0;
  4975. if (ver == NULL) {
  4976. GSIERR("bad parms: ver == NULL\n");
  4977. return -EINVAL;
  4978. }
  4979. if (gsi_ctx->per.ver < GSI_VER_3_0)
  4980. raw = gsihal_read_reg_n(GSI_GSI_INST_RAM_n,
  4981. GSI_INST_RAM_FW_VER_OFFSET);
  4982. else
  4983. raw = gsihal_read_reg_n(GSI_GSI_INST_RAM_n,
  4984. GSI_INST_RAM_FW_VER_GSI_3_0_OFFSET);
  4985. ver->hw = (raw & GSI_INST_RAM_FW_VER_HW_MASK) >>
  4986. GSI_INST_RAM_FW_VER_HW_SHIFT;
  4987. ver->flavor = (raw & GSI_INST_RAM_FW_VER_FLAVOR_MASK) >>
  4988. GSI_INST_RAM_FW_VER_FLAVOR_SHIFT;
  4989. ver->fw = (raw & GSI_INST_RAM_FW_VER_FW_MASK) >>
  4990. GSI_INST_RAM_FW_VER_FW_SHIFT;
  4991. return 0;
  4992. }
  4993. #if IS_ENABLED(CONFIG_QCOM_VA_MINIDUMP)
  4994. static int qcom_va_md_gsi_notif_handler(struct notifier_block *this,
  4995. unsigned long event, void *ptr)
  4996. {
  4997. struct va_md_entry entry;
  4998. strlcpy(entry.owner, "gsi_mini", sizeof(entry.owner));
  4999. entry.vaddr = (unsigned long)gsi_ctx;
  5000. entry.size = sizeof(struct gsi_ctx);
  5001. qcom_va_md_add_region(&entry);
  5002. return NOTIFY_OK;
  5003. }
  5004. static struct notifier_block qcom_va_md_gsi_notif_blk = {
  5005. .notifier_call = qcom_va_md_gsi_notif_handler,
  5006. .priority = INT_MAX,
  5007. };
  5008. #endif
  5009. static int msm_gsi_probe(struct platform_device *pdev)
  5010. {
  5011. struct device *dev = &pdev->dev;
  5012. int result;
  5013. pr_debug("gsi_probe\n");
  5014. gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
  5015. if (!gsi_ctx) {
  5016. dev_err(dev, "failed to allocated gsi context\n");
  5017. return -ENOMEM;
  5018. }
  5019. gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
  5020. "gsi", MINIDUMP_MASK);
  5021. if (gsi_ctx->ipc_logbuf == NULL)
  5022. GSIERR("failed to create IPC log, continue...\n");
  5023. result = of_property_read_u32(pdev->dev.of_node, "qcom,num-msi",
  5024. &gsi_ctx->msi.num);
  5025. if (result)
  5026. GSIERR("No MSIs configured\n");
  5027. else {
  5028. if (gsi_ctx->msi.num > GSI_MAX_NUM_MSI) {
  5029. GSIERR("Num MSIs %u larger than max %u, normalizing\n",
  5030. gsi_ctx->msi.num,
  5031. GSI_MAX_NUM_MSI);
  5032. gsi_ctx->msi.num = GSI_MAX_NUM_MSI;
  5033. } else GSIDBG("Num MSIs=%u\n", gsi_ctx->msi.num);
  5034. }
  5035. gsi_ctx->dev = dev;
  5036. init_completion(&gsi_ctx->gen_ee_cmd_compl);
  5037. gsi_debugfs_init();
  5038. #if IS_ENABLED(CONFIG_QCOM_VA_MINIDUMP)
  5039. result = qcom_va_md_register("gsi_mini", &qcom_va_md_gsi_notif_blk);
  5040. if(result)
  5041. GSIERR("gsi mini qcom_va_md_register failed = %d\n", result);
  5042. else
  5043. GSIDBG("gsi mini qcom_va_md_register success\n");
  5044. #endif
  5045. return 0;
  5046. }
  5047. static struct platform_driver msm_gsi_driver = {
  5048. .probe = msm_gsi_probe,
  5049. .driver = {
  5050. .name = "gsi",
  5051. .of_match_table = msm_gsi_match,
  5052. },
  5053. };
  5054. static struct platform_device *pdev;
  5055. /**
  5056. * Module Init.
  5057. */
  5058. static int __init gsi_init(void)
  5059. {
  5060. int ret;
  5061. pr_debug("%s\n", __func__);
  5062. ret = platform_driver_register(&msm_gsi_driver);
  5063. if (ret < 0)
  5064. goto out;
  5065. if (running_emulation) {
  5066. pdev = platform_device_register_simple("gsi", -1, NULL, 0);
  5067. if (IS_ERR(pdev)) {
  5068. ret = PTR_ERR(pdev);
  5069. platform_driver_unregister(&msm_gsi_driver);
  5070. goto out;
  5071. }
  5072. }
  5073. out:
  5074. return ret;
  5075. }
  5076. arch_initcall(gsi_init);
  5077. /*
  5078. * Module exit.
  5079. */
  5080. static void __exit gsi_exit(void)
  5081. {
  5082. if (running_emulation && pdev)
  5083. platform_device_unregister(pdev);
  5084. platform_driver_unregister(&msm_gsi_driver);
  5085. }
  5086. module_exit(gsi_exit);
  5087. MODULE_LICENSE("GPL v2");
  5088. MODULE_DESCRIPTION("Generic Software Interface (GSI)");