gsi.c 142 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/io.h>
  8. #include <linux/log2.h>
  9. #include <linux/module.h>
  10. #include <linux/msm_gsi.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/delay.h>
  13. #include "gsi.h"
  14. #include "gsi_emulation.h"
  15. #include "gsihal.h"
  16. #include <asm/arch_timer.h>
  17. #include <linux/sched/clock.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/sched.h>
  20. #include <linux/wait.h>
  21. #include <linux/delay.h>
  22. #include <linux/version.h>
  23. #define GSI_CMD_TIMEOUT (5*HZ)
  24. #define GSI_START_CMD_TIMEOUT_MS 1000
  25. #define GSI_CMD_POLL_CNT 5
  26. #define GSI_STOP_CMD_TIMEOUT_MS 200
  27. #define GSI_MAX_CH_LOW_WEIGHT 15
  28. #define GSI_IRQ_STORM_THR 5
  29. #define GSI_STOP_CMD_POLL_CNT 4
  30. #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
  31. #define GSI_RESET_WA_MIN_SLEEP 1000
  32. #define GSI_RESET_WA_MAX_SLEEP 2000
  33. #define GSI_CHNL_STATE_MAX_RETRYCNT 10
  34. #define GSI_STTS_REG_BITS 32
  35. #define GSI_MSB_MASK 0xFFFFFFFF00000000ULL
  36. #define GSI_LSB_MASK 0x00000000FFFFFFFFULL
  37. #define GSI_MSB(num) ((u32)((num & GSI_MSB_MASK) >> 32))
  38. #define GSI_LSB(num) ((u32)(num & GSI_LSB_MASK))
  39. #define GSI_INST_RAM_FW_VER_OFFSET (0)
  40. #define GSI_INST_RAM_FW_VER_GSI_3_0_OFFSET (64)
  41. #define GSI_INST_RAM_FW_VER_HW_MASK (0xFC00)
  42. #define GSI_INST_RAM_FW_VER_HW_SHIFT (10)
  43. #define GSI_INST_RAM_FW_VER_FLAVOR_MASK (0x380)
  44. #define GSI_INST_RAM_FW_VER_FLAVOR_SHIFT (7)
  45. #define GSI_INST_RAM_FW_VER_FW_MASK (0x7f)
  46. #define GSI_INST_RAM_FW_VER_FW_SHIFT (0)
  47. #ifndef CONFIG_DEBUG_FS
  48. void gsi_debugfs_init(void)
  49. {
  50. }
  51. #endif
  52. static const struct of_device_id msm_gsi_match[] = {
  53. { .compatible = "qcom,msm_gsi", },
  54. { },
  55. };
  56. #if defined(CONFIG_IPA_EMULATION)
  57. static bool running_emulation = true;
  58. #else
  59. static bool running_emulation;
  60. #endif
  61. struct gsi_ctx *gsi_ctx;
  62. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  63. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr);
  64. static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
  65. {
  66. uint32_t curr;
  67. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee);
  68. gsihal_write_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee,
  69. (curr & ~mask) | (val & mask));
  70. }
  71. static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
  72. {
  73. uint32_t curr;
  74. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee);
  75. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee,
  76. (curr & ~mask) | (val & mask));
  77. }
  78. static void __gsi_config_all_ch_irq(int ee, uint32_t mask, uint32_t val)
  79. {
  80. uint32_t curr, k, max_k;
  81. max_k = gsihal_get_bit_map_array_size();
  82. for (k = 0; k < max_k; k++)
  83. {
  84. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k);
  85. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k,
  86. (curr & ~mask) | (val & mask));
  87. }
  88. }
  89. static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
  90. {
  91. uint32_t curr;
  92. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee);
  93. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee,
  94. (curr & ~mask) | (val & mask));
  95. }
  96. static void __gsi_config_all_evt_irq(int ee, uint32_t mask, uint32_t val)
  97. {
  98. uint32_t curr, k, max_k;
  99. max_k = gsihal_get_bit_map_array_size();
  100. for (k = 0; k < max_k; k++)
  101. {
  102. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k);
  103. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k,
  104. (curr & ~mask) | (val & mask));
  105. }
  106. }
  107. static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
  108. {
  109. uint32_t curr;
  110. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  111. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee,
  112. (curr & ~mask) | (val & mask));
  113. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  114. curr, ((curr & ~mask) | (val & mask)));
  115. }
  116. static void __gsi_config_all_ieob_irq(int ee, uint32_t mask, uint32_t val)
  117. {
  118. uint32_t curr, k, max_k;
  119. max_k = gsihal_get_bit_map_array_size();
  120. for (k = 0; k < max_k; k++)
  121. {
  122. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  123. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  124. (curr & ~mask) | (val & mask));
  125. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  126. curr, ((curr & ~mask) | (val & mask)));
  127. }
  128. }
  129. static void __gsi_config_ieob_irq_k(int ee, uint32_t k, uint32_t mask, uint32_t val)
  130. {
  131. uint32_t curr;
  132. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  133. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  134. (curr & ~mask) | (val & mask));
  135. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  136. curr, ((curr & ~mask) | (val & mask)));
  137. }
  138. static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
  139. {
  140. uint32_t curr;
  141. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee);
  142. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee,
  143. (curr & ~mask) | (val & mask));
  144. }
  145. static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
  146. {
  147. uint32_t curr;
  148. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee);
  149. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee,
  150. (curr & ~mask) | (val & mask));
  151. }
  152. static void gsi_channel_state_change_wait(unsigned long chan_hdl,
  153. struct gsi_chan_ctx *ctx,
  154. uint32_t tm, enum gsi_ch_cmd_opcode op)
  155. {
  156. int poll_cnt;
  157. int gsi_pending_intr;
  158. int res;
  159. struct gsihal_reg_ctx_type_irq type;
  160. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  161. int ee = gsi_ctx->per.ee;
  162. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  163. int stop_in_proc_retry = 0;
  164. int stop_retry = 0;
  165. /*
  166. * Start polling the GSI channel for
  167. * duration = tm * GSI_CMD_POLL_CNT.
  168. * We need to do polling of gsi state for improving debugability
  169. * of gsi hw state.
  170. */
  171. for (poll_cnt = 0;
  172. poll_cnt < GSI_CMD_POLL_CNT;
  173. poll_cnt++) {
  174. res = wait_for_completion_timeout(&ctx->compl,
  175. msecs_to_jiffies(tm));
  176. /* Interrupt received, return */
  177. if (res != 0)
  178. return;
  179. gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ, ee, &type);
  180. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  181. gsi_pending_intr = gsihal_read_reg_nk(
  182. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k,
  183. ee, gsihal_get_ch_reg_idx(chan_hdl));
  184. } else {
  185. gsi_pending_intr = gsihal_read_reg_n(
  186. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  187. }
  188. if (gsi_ctx->per.ver == GSI_VER_1_0) {
  189. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  190. ee, chan_hdl, &ch_k_cntxt_0);
  191. curr_state = ch_k_cntxt_0.chstate;
  192. }
  193. /* Update the channel state only if interrupt was raised
  194. * on particular channel and also checking global interrupt
  195. * is raised for channel control.
  196. */
  197. if ((type.ch_ctrl) &&
  198. (gsi_pending_intr & gsihal_get_ch_reg_mask(chan_hdl))) {
  199. /*
  200. * Check channel state here in case the channel is
  201. * already started but interrupt is not yet received.
  202. */
  203. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  204. ee, chan_hdl, &ch_k_cntxt_0);
  205. curr_state = ch_k_cntxt_0.chstate;
  206. }
  207. if (op == GSI_CH_START) {
  208. if (curr_state == GSI_CHAN_STATE_STARTED ||
  209. curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  210. ctx->state = curr_state;
  211. return;
  212. }
  213. }
  214. if (op == GSI_CH_STOP) {
  215. if (curr_state == GSI_CHAN_STATE_STOPPED)
  216. stop_retry++;
  217. else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
  218. stop_in_proc_retry++;
  219. }
  220. /* if interrupt marked reg after poll count reaching to max
  221. * keep loop to continue reach max stop proc and max stop count.
  222. */
  223. if (stop_retry == 1 || stop_in_proc_retry == 1)
  224. poll_cnt = 0;
  225. /* If stop channel retry reached to max count
  226. * clear the pending interrupt, if channel already stopped.
  227. */
  228. if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
  229. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  230. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k,
  231. ee, gsihal_get_ch_reg_idx(chan_hdl),
  232. gsi_pending_intr);
  233. }
  234. else {
  235. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR,
  236. ee,
  237. gsi_pending_intr);
  238. }
  239. ctx->state = curr_state;
  240. return;
  241. }
  242. /* If channel state stop in progress case no need
  243. * to wait for long time.
  244. */
  245. if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
  246. ctx->state = curr_state;
  247. return;
  248. }
  249. GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
  250. chan_hdl,
  251. type,
  252. ctx->state,
  253. gsi_pending_intr);
  254. }
  255. GSIDBG("invalidating the channel state when timeout happens\n");
  256. ctx->state = curr_state;
  257. }
  258. static void gsi_handle_ch_ctrl(int ee)
  259. {
  260. uint32_t ch;
  261. int i, k, max_k;
  262. uint32_t ch_hdl;
  263. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  264. struct gsi_chan_ctx *ctx;
  265. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  266. max_k = gsihal_get_bit_map_array_size();
  267. for (k = 0; k < max_k; k++) {
  268. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k, ee, k);
  269. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k, ee, k, ch);
  270. GSIDBG("ch %x\n", ch);
  271. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  272. if ((1 << i) & ch) {
  273. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  274. if (ch_hdl >= gsi_ctx->max_ch ||
  275. ch_hdl >= GSI_CHAN_MAX) {
  276. GSIERR("invalid channel %d\n",
  277. ch_hdl);
  278. break;
  279. }
  280. ctx = &gsi_ctx->chan[ch_hdl];
  281. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  282. ee, ch_hdl, &ch_k_cntxt_0);
  283. ctx->state = ch_k_cntxt_0.chstate;
  284. GSIDBG("ch %u state updated to %u\n",
  285. ch_hdl, ctx->state);
  286. complete(&ctx->compl);
  287. gsi_ctx->ch_dbg[ch_hdl].cmd_completed++;
  288. }
  289. }
  290. }
  291. } else {
  292. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  293. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR, ee, ch);
  294. GSIDBG("ch %x\n", ch);
  295. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  296. if ((1 << i) & ch) {
  297. if (i >= gsi_ctx->max_ch ||
  298. i >= GSI_CHAN_MAX) {
  299. GSIERR("invalid channel %d\n", i);
  300. break;
  301. }
  302. ctx = &gsi_ctx->chan[i];
  303. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  304. ee, i, &ch_k_cntxt_0);
  305. ctx->state = ch_k_cntxt_0.chstate;
  306. GSIDBG("ch %u state updated to %u\n", i,
  307. ctx->state);
  308. complete(&ctx->compl);
  309. gsi_ctx->ch_dbg[i].cmd_completed++;
  310. }
  311. }
  312. }
  313. }
  314. static void gsi_handle_ev_ctrl(int ee)
  315. {
  316. uint32_t ch;
  317. int i, k;
  318. uint32_t evt_hdl, max_k;
  319. struct gsi_evt_ctx *ctx;
  320. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  321. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  322. max_k = gsihal_get_bit_map_array_size();
  323. for (k = 0; k < max_k; k++) {
  324. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_k, ee, k);
  325. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  326. GSIDBG("ev %x\n", ch);
  327. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  328. if ((1 << i) & ch) {
  329. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  330. if (evt_hdl >= gsi_ctx->max_ev ||
  331. evt_hdl >= GSI_EVT_RING_MAX) {
  332. GSIERR("invalid event %d\n",
  333. evt_hdl);
  334. break;
  335. }
  336. ctx = &gsi_ctx->evtr[evt_hdl];
  337. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  338. ee, evt_hdl, &ev_ch_k_cntxt_0);
  339. ctx->state = ev_ch_k_cntxt_0.chstate;
  340. GSIDBG("evt %u state updated to %u\n",
  341. evt_hdl, ctx->state);
  342. complete(&ctx->compl);
  343. }
  344. }
  345. }
  346. } else {
  347. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ, ee);
  348. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR, ee, ch);
  349. GSIDBG("ev %x\n", ch);
  350. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  351. if ((1 << i) & ch) {
  352. if (i >= gsi_ctx->max_ev ||
  353. i >= GSI_EVT_RING_MAX) {
  354. GSIERR("invalid event %d\n", i);
  355. break;
  356. }
  357. ctx = &gsi_ctx->evtr[i];
  358. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  359. ee, i, &ev_ch_k_cntxt_0);
  360. ctx->state = ev_ch_k_cntxt_0.chstate;
  361. GSIDBG("evt %u state updated to %u\n", i,
  362. ctx->state);
  363. complete(&ctx->compl);
  364. }
  365. }
  366. }
  367. }
  368. static void gsi_handle_glob_err(uint32_t err)
  369. {
  370. struct gsi_log_err *log;
  371. struct gsi_chan_ctx *ch;
  372. struct gsi_evt_ctx *ev;
  373. struct gsi_chan_err_notify chan_notify;
  374. struct gsi_evt_err_notify evt_notify;
  375. struct gsi_per_notify per_notify;
  376. enum gsi_err_type err_type;
  377. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  378. log = (struct gsi_log_err *)&err;
  379. GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
  380. log->virt_idx);
  381. GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
  382. log->arg2, log->arg3);
  383. err_type = log->err_type;
  384. /*
  385. * These are errors thrown by hardware. We need
  386. * BUG_ON() to capture the hardware state right
  387. * when it is unexpected.
  388. */
  389. switch (err_type) {
  390. case GSI_ERR_TYPE_GLOB:
  391. per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
  392. per_notify.user_data = gsi_ctx->per.user_data;
  393. per_notify.data.err_desc = err & 0xFFFF;
  394. gsi_ctx->per.notify_cb(&per_notify);
  395. break;
  396. case GSI_ERR_TYPE_CHAN:
  397. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
  398. GSIERR("Unexpected ch %d\n", log->virt_idx);
  399. return;
  400. }
  401. ch = &gsi_ctx->chan[log->virt_idx];
  402. chan_notify.chan_user_data = ch->props.chan_user_data;
  403. chan_notify.err_desc = err & 0xFFFF;
  404. if (log->code == GSI_INVALID_TRE_ERR) {
  405. if (log->ee != gsi_ctx->per.ee) {
  406. GSIERR("unexpected EE in event %d\n", log->ee);
  407. GSI_ASSERT();
  408. }
  409. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  410. gsi_ctx->per.ee, log->virt_idx, &ch_k_cntxt_0);
  411. ch->state = ch_k_cntxt_0.chstate;
  412. GSIDBG("ch %u state updated to %u\n", log->virt_idx,
  413. ch->state);
  414. ch->stats.invalid_tre_error++;
  415. if (ch->state == GSI_CHAN_STATE_ERROR) {
  416. GSIERR("Unexpected channel state %d\n",
  417. ch->state);
  418. GSI_ASSERT();
  419. }
  420. chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
  421. } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  422. if (log->ee != gsi_ctx->per.ee) {
  423. GSIERR("unexpected EE in event %d\n", log->ee);
  424. GSI_ASSERT();
  425. }
  426. chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
  427. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  428. if (log->ee != gsi_ctx->per.ee) {
  429. GSIERR("unexpected EE in event %d\n", log->ee);
  430. GSI_ASSERT();
  431. }
  432. chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
  433. complete(&ch->compl);
  434. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  435. chan_notify.evt_id =
  436. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
  437. } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
  438. if (log->ee != gsi_ctx->per.ee) {
  439. GSIERR("unexpected EE in event %d\n", log->ee);
  440. GSI_ASSERT();
  441. }
  442. chan_notify.evt_id =
  443. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
  444. } else if (log->code == GSI_HWO_1_ERR) {
  445. if (log->ee != gsi_ctx->per.ee) {
  446. GSIERR("unexpected EE in event %d\n", log->ee);
  447. GSI_ASSERT();
  448. }
  449. chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
  450. } else {
  451. GSIERR("unexpected event log code %d\n", log->code);
  452. GSI_ASSERT();
  453. }
  454. ch->props.err_cb(&chan_notify);
  455. break;
  456. case GSI_ERR_TYPE_EVT:
  457. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
  458. GSIERR("Unexpected ev %d\n", log->virt_idx);
  459. return;
  460. }
  461. ev = &gsi_ctx->evtr[log->virt_idx];
  462. evt_notify.user_data = ev->props.user_data;
  463. evt_notify.err_desc = err & 0xFFFF;
  464. if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  465. if (log->ee != gsi_ctx->per.ee) {
  466. GSIERR("unexpected EE in event %d\n", log->ee);
  467. GSI_ASSERT();
  468. }
  469. evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
  470. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  471. if (log->ee != gsi_ctx->per.ee) {
  472. GSIERR("unexpected EE in event %d\n", log->ee);
  473. GSI_ASSERT();
  474. }
  475. evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
  476. complete(&ev->compl);
  477. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  478. evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
  479. } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
  480. if (log->ee != gsi_ctx->per.ee) {
  481. GSIERR("unexpected EE in event %d\n", log->ee);
  482. GSI_ASSERT();
  483. }
  484. evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
  485. } else {
  486. GSIERR("unexpected event log code %d\n", log->code);
  487. GSI_ASSERT();
  488. }
  489. ev->props.err_cb(&evt_notify);
  490. break;
  491. }
  492. }
  493. static void gsi_handle_gp_int1(void)
  494. {
  495. complete(&gsi_ctx->gen_ee_cmd_compl);
  496. }
  497. static void gsi_handle_glob_ee(int ee)
  498. {
  499. uint32_t val;
  500. uint32_t err;
  501. struct gsi_per_notify notify;
  502. uint32_t clr = ~0;
  503. struct gsihal_reg_cntxt_glob_irq_stts cntxt_glob_irq_stts;
  504. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GLOB_IRQ_STTS,
  505. ee, &cntxt_glob_irq_stts);
  506. notify.user_data = gsi_ctx->per.user_data;
  507. if(cntxt_glob_irq_stts.error_int) {
  508. err = gsihal_read_reg_n(GSI_EE_n_ERROR_LOG, ee);
  509. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  510. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, ee, 0);
  511. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG_CLR, ee, clr);
  512. gsi_handle_glob_err(err);
  513. }
  514. if (cntxt_glob_irq_stts.gp_int1)
  515. gsi_handle_gp_int1();
  516. if (cntxt_glob_irq_stts.gp_int2) {
  517. notify.evt_id = GSI_PER_EVT_GLOB_GP2;
  518. gsi_ctx->per.notify_cb(&notify);
  519. }
  520. if (cntxt_glob_irq_stts.gp_int3) {
  521. notify.evt_id = GSI_PER_EVT_GLOB_GP3;
  522. gsi_ctx->per.notify_cb(&notify);
  523. }
  524. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_CLR, ee, val);
  525. }
  526. static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
  527. {
  528. ctx->wp_local += ctx->elem_sz;
  529. if (ctx->wp_local == ctx->end)
  530. ctx->wp_local = ctx->base;
  531. }
  532. static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
  533. {
  534. ctx->rp_local += ctx->elem_sz;
  535. if (ctx->rp_local == ctx->end)
  536. ctx->rp_local = ctx->base;
  537. }
  538. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
  539. {
  540. WARN_ON(addr < ctx->base || addr >= ctx->end);
  541. return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
  542. }
  543. static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
  544. uint64_t addr2)
  545. {
  546. uint32_t addr_diff;
  547. GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
  548. ctx->base, ctx->end);
  549. if (addr1 < ctx->base || addr1 >= ctx->end) {
  550. GSIERR("address = 0x%llx not in range\n", addr1);
  551. GSI_ASSERT();
  552. }
  553. if (addr2 < ctx->base || addr2 >= ctx->end) {
  554. GSIERR("address = 0x%llx not in range\n", addr2);
  555. GSI_ASSERT();
  556. }
  557. addr_diff = (uint32_t)(addr2 - addr1);
  558. if (addr1 < addr2)
  559. return addr_diff / ctx->elem_sz;
  560. else
  561. return (addr_diff + ctx->len) / ctx->elem_sz;
  562. }
  563. static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
  564. struct gsi_chan_xfer_notify *notify, bool callback)
  565. {
  566. uint32_t ch_id;
  567. struct gsi_chan_ctx *ch_ctx;
  568. uint16_t rp_idx;
  569. uint64_t rp;
  570. ch_id = evt->chid;
  571. if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
  572. GSIERR("Unexpected ch %d\n", ch_id);
  573. return;
  574. }
  575. ch_ctx = &gsi_ctx->chan[ch_id];
  576. if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
  577. ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
  578. return;
  579. if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
  580. rp = evt->xfer_ptr;
  581. if (ch_ctx->ring.rp_local != rp) {
  582. ch_ctx->stats.completed +=
  583. gsi_get_complete_num(&ch_ctx->ring,
  584. ch_ctx->ring.rp_local, rp);
  585. ch_ctx->ring.rp_local = rp;
  586. }
  587. /*
  588. * Increment RP local only in polling context to avoid
  589. * sys len mismatch.
  590. */
  591. if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
  592. !ch_ctx->props.tx_poll))
  593. /* the element at RP is also processed */
  594. gsi_incr_ring_rp(&ch_ctx->ring);
  595. ch_ctx->ring.rp = ch_ctx->ring.rp_local;
  596. rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
  597. notify->veid = GSI_VEID_DEFAULT;
  598. } else {
  599. rp_idx = evt->cookie;
  600. notify->veid = evt->veid;
  601. }
  602. WARN_ON(!ch_ctx->user_data[rp_idx].valid);
  603. notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
  604. /*
  605. * In suspend just before stopping the channel possible to receive
  606. * the IEOB interrupt and xfer pointer will not be processed in this
  607. * mode and moving channel poll mode. In resume after starting the
  608. * channel will receive the IEOB interrupt and xfer pointer will be
  609. * overwritten. To avoid this process all data in polling context.
  610. */
  611. if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
  612. !ch_ctx->props.tx_poll)) {
  613. ch_ctx->stats.completed++;
  614. ch_ctx->user_data[rp_idx].valid = false;
  615. }
  616. notify->chan_user_data = ch_ctx->props.chan_user_data;
  617. notify->evt_id = evt->code;
  618. notify->bytes_xfered = evt->len;
  619. if (callback) {
  620. if (atomic_read(&ch_ctx->poll_mode)) {
  621. GSIERR("Calling client callback in polling mode\n");
  622. WARN_ON(1);
  623. }
  624. ch_ctx->props.xfer_cb(notify);
  625. }
  626. }
  627. static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
  628. struct gsi_chan_xfer_notify *notify, bool callback)
  629. {
  630. struct gsi_xfer_compl_evt *evt;
  631. struct gsi_chan_ctx *ch_ctx;
  632. evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
  633. ctx->ring.rp_local - ctx->ring.base);
  634. gsi_process_chan(evt, notify, callback);
  635. /*
  636. * Increment RP local only in polling context to avoid
  637. * sys len mismatch.
  638. */
  639. ch_ctx = &gsi_ctx->chan[evt->chid];
  640. if (callback && (ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI ||
  641. ch_ctx->props.tx_poll))
  642. return;
  643. gsi_incr_ring_rp(&ctx->ring);
  644. /* recycle this element */
  645. gsi_incr_ring_wp(&ctx->ring);
  646. ctx->stats.completed++;
  647. }
  648. static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
  649. {
  650. uint32_t val;
  651. ctx->ring.wp = ctx->ring.wp_local;
  652. val = GSI_LSB(ctx->ring.wp_local);
  653. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_DOORBELL_0,
  654. gsi_ctx->per.ee, ctx->id, val);
  655. }
  656. void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl) {
  657. struct gsi_evt_ctx *ctx;
  658. ctx = gsi_ctx->chan[chan_hdl].evtr;
  659. gsi_ring_evt_doorbell(ctx);
  660. }
  661. EXPORT_SYMBOL(gsi_ring_evt_doorbell_polling_mode);
  662. static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
  663. {
  664. uint32_t val;
  665. /*
  666. * allocate new events for this channel first
  667. * before submitting the new TREs.
  668. * for TO_GSI channels the event ring doorbell is rang as part of
  669. * interrupt handling.
  670. */
  671. if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  672. gsi_ring_evt_doorbell(ctx->evtr);
  673. ctx->ring.wp = ctx->ring.wp_local;
  674. val = GSI_LSB(ctx->ring.wp_local);
  675. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  676. gsi_ctx->per.ee, ctx->props.ch_id, val);
  677. }
  678. static bool check_channel_polling(struct gsi_evt_ctx* ctx) {
  679. /* For shared event rings both channels will be marked */
  680. return atomic_read(&ctx->chan[0]->poll_mode);
  681. }
  682. static void gsi_handle_ieob(int ee)
  683. {
  684. uint32_t ch, evt_hdl;
  685. int i, k, max_k;
  686. uint64_t rp;
  687. struct gsi_evt_ctx *ctx;
  688. struct gsi_chan_xfer_notify notify;
  689. unsigned long flags;
  690. unsigned long cntr;
  691. uint32_t msk;
  692. bool empty;
  693. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  694. max_k = gsihal_get_bit_map_array_size();
  695. for (k = 0; k < max_k; k++) {
  696. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_k, ee, k);
  697. msk = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  698. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee, k, ch & msk);
  699. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  700. if ((1 << i) & ch & msk) {
  701. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  702. if (evt_hdl >= gsi_ctx->max_ev ||
  703. evt_hdl >= GSI_EVT_RING_MAX) {
  704. GSIERR("invalid event %d\n",
  705. evt_hdl);
  706. break;
  707. }
  708. ctx = &gsi_ctx->evtr[evt_hdl];
  709. /*
  710. * Don't handle MSI interrupts, only handle IEOB
  711. * IRQs
  712. */
  713. if (ctx->props.intr == GSI_INTR_MSI)
  714. continue;
  715. if (ctx->props.intf !=
  716. GSI_EVT_CHTYPE_GPI_EV) {
  717. GSIERR("Unexpected irq intf %d\n",
  718. ctx->props.intf);
  719. GSI_ASSERT();
  720. }
  721. spin_lock_irqsave(&ctx->ring.slock,
  722. flags);
  723. check_again_v3_0:
  724. cntr = 0;
  725. empty = true;
  726. rp = ctx->props.gsi_read_event_ring_rp(
  727. &ctx->props, ctx->id, ee);
  728. rp |= ctx->ring.rp & GSI_MSB_MASK;
  729. ctx->ring.rp = rp;
  730. while (ctx->ring.rp_local != rp) {
  731. ++cntr;
  732. if (check_channel_polling(ctx)) {
  733. cntr = 0;
  734. break;
  735. }
  736. gsi_process_evt_re(ctx, &notify,
  737. true);
  738. empty = false;
  739. }
  740. if (!empty)
  741. gsi_ring_evt_doorbell(ctx);
  742. if (cntr != 0)
  743. goto check_again_v3_0;
  744. spin_unlock_irqrestore(&ctx->ring.slock,
  745. flags);
  746. }
  747. }
  748. }
  749. } else {
  750. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ, ee);
  751. msk = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  752. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, ch & msk);
  753. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  754. if ((1 << i) & ch & msk) {
  755. if (i >= gsi_ctx->max_ev ||
  756. i >= GSI_EVT_RING_MAX) {
  757. GSIERR("invalid event %d\n", i);
  758. break;
  759. }
  760. ctx = &gsi_ctx->evtr[i];
  761. /*
  762. * Don't handle MSI interrupts, only handle IEOB
  763. * IRQs
  764. */
  765. if (ctx->props.intr == GSI_INTR_MSI)
  766. continue;
  767. if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  768. GSIERR("Unexpected irq intf %d\n",
  769. ctx->props.intf);
  770. GSI_ASSERT();
  771. }
  772. spin_lock_irqsave(&ctx->ring.slock, flags);
  773. check_again:
  774. cntr = 0;
  775. empty = true;
  776. rp = ctx->props.gsi_read_event_ring_rp(
  777. &ctx->props, ctx->id, ee);
  778. rp |= ctx->ring.rp & GSI_MSB_MASK;
  779. ctx->ring.rp = rp;
  780. while (ctx->ring.rp_local != rp) {
  781. ++cntr;
  782. if (check_channel_polling(ctx)) {
  783. cntr = 0;
  784. break;
  785. }
  786. gsi_process_evt_re(ctx, &notify, true);
  787. empty = false;
  788. }
  789. if (!empty)
  790. gsi_ring_evt_doorbell(ctx);
  791. if (cntr != 0)
  792. goto check_again;
  793. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  794. }
  795. }
  796. }
  797. }
  798. static void gsi_handle_inter_ee_ch_ctrl(int ee)
  799. {
  800. uint32_t ch, ch_hdl;
  801. int i, k, max_k;
  802. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  803. max_k = gsihal_get_bit_map_array_size();
  804. for (k = 0; k < max_k; k++) {
  805. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k);
  806. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k, ch);
  807. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  808. if ((1 << i) & ch) {
  809. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  810. /* not currently expected */
  811. GSIERR("ch %u was inter-EE changed\n", ch_hdl);
  812. }
  813. }
  814. }
  815. } else {
  816. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee);
  817. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee, ch);
  818. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  819. if ((1 << i) & ch) {
  820. /* not currently expected */
  821. GSIERR("ch %u was inter-EE changed\n", i);
  822. }
  823. }
  824. }
  825. }
  826. static void gsi_handle_inter_ee_ev_ctrl(int ee)
  827. {
  828. uint32_t ch, evt_hdl;
  829. int i, k, max_k;
  830. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  831. max_k = gsihal_get_bit_map_array_size();
  832. for (k = 0; k < max_k; k++) {
  833. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_k, ee, k);
  834. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  835. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  836. if ((1 << i) & ch) {
  837. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  838. /* not currently expected */
  839. GSIERR("evt %u was inter-EE changed\n",
  840. evt_hdl);
  841. }
  842. }
  843. }
  844. } else {
  845. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ, ee);
  846. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR, ee, ch);
  847. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  848. if ((1 << i) & ch) {
  849. /* not currently expected */
  850. GSIERR("evt %u was inter-EE changed\n", i);
  851. }
  852. }
  853. }
  854. }
  855. static void gsi_handle_general(int ee)
  856. {
  857. uint32_t val;
  858. struct gsi_per_notify notify;
  859. struct gsihal_reg_cntxt_gsi_irq_stts gsi_irq_stts;
  860. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_STTS,
  861. ee, &gsi_irq_stts);
  862. notify.user_data = gsi_ctx->per.user_data;
  863. if (gsi_irq_stts.gsi_mcs_stack_ovrflow)
  864. notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
  865. if (gsi_irq_stts.gsi_cmd_fifo_ovrflow)
  866. notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
  867. if (gsi_irq_stts.gsi_bus_error)
  868. notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
  869. if (gsi_irq_stts.gsi_break_point)
  870. notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
  871. if (gsi_ctx->per.notify_cb)
  872. gsi_ctx->per.notify_cb(&notify);
  873. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_CLR, ee, val);
  874. }
  875. static void gsi_handle_irq(void)
  876. {
  877. uint32_t type;
  878. int ee = gsi_ctx->per.ee;
  879. int index;
  880. struct gsihal_reg_ctx_type_irq ctx_type_irq;
  881. while (1) {
  882. if (!gsi_ctx->per.clk_status_cb())
  883. break;
  884. type = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ,
  885. ee, &ctx_type_irq);
  886. if (!type)
  887. break;
  888. GSIDBG_LOW("type 0x%x\n", type);
  889. index = gsi_ctx->gsi_isr_cache_index;
  890. gsi_ctx->gsi_isr_cache[index].timestamp =
  891. sched_clock();
  892. gsi_ctx->gsi_isr_cache[index].qtimer =
  893. __arch_counter_get_cntvct();
  894. gsi_ctx->gsi_isr_cache[index].interrupt_type = type;
  895. gsi_ctx->gsi_isr_cache_index++;
  896. if (gsi_ctx->gsi_isr_cache_index == GSI_ISR_CACHE_MAX)
  897. gsi_ctx->gsi_isr_cache_index = 0;
  898. if(ctx_type_irq.ch_ctrl) {
  899. gsi_handle_ch_ctrl(ee);
  900. break;
  901. }
  902. if (ctx_type_irq.ev_ctrl) {
  903. gsi_handle_ev_ctrl(ee);
  904. break;
  905. }
  906. if (ctx_type_irq.glob_ee)
  907. gsi_handle_glob_ee(ee);
  908. if (ctx_type_irq.ieob)
  909. gsi_handle_ieob(ee);
  910. if (ctx_type_irq.inter_ee_ch_ctrl)
  911. gsi_handle_inter_ee_ch_ctrl(ee);
  912. if (ctx_type_irq.inter_ee_ev_ctrl)
  913. gsi_handle_inter_ee_ev_ctrl(ee);
  914. if (ctx_type_irq.general)
  915. gsi_handle_general(ee);
  916. }
  917. }
  918. static irqreturn_t gsi_isr(int irq, void *ctxt)
  919. {
  920. if (gsi_ctx->per.req_clk_cb) {
  921. bool granted = false;
  922. gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
  923. if (granted) {
  924. gsi_handle_irq();
  925. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  926. }
  927. } else if (!gsi_ctx->per.clk_status_cb()) {
  928. /* we only want to capture the gsi isr storm here */
  929. if (atomic_read(&gsi_ctx->num_unclock_irq) ==
  930. GSI_IRQ_STORM_THR)
  931. gsi_ctx->per.enable_clk_bug_on();
  932. atomic_inc(&gsi_ctx->num_unclock_irq);
  933. return IRQ_HANDLED;
  934. } else {
  935. atomic_set(&gsi_ctx->num_unclock_irq, 0);
  936. gsi_handle_irq();
  937. }
  938. return IRQ_HANDLED;
  939. }
  940. static uint32_t gsi_get_max_channels(enum gsi_ver ver)
  941. {
  942. uint32_t max_ch = 0;
  943. struct gsihal_reg_hw_param hw_param;
  944. struct gsihal_reg_hw_param2 hw_param2;
  945. switch (ver) {
  946. case GSI_VER_ERR:
  947. case GSI_VER_MAX:
  948. GSIERR("GSI version is not supported %d\n", ver);
  949. WARN_ON(1);
  950. break;
  951. case GSI_VER_1_0:
  952. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  953. gsi_ctx->per.ee, &hw_param);
  954. max_ch = hw_param.gsi_ch_num;
  955. break;
  956. case GSI_VER_1_2:
  957. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  958. gsi_ctx->per.ee, &hw_param);
  959. max_ch = hw_param.gsi_ch_num;
  960. break;
  961. default:
  962. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  963. gsi_ctx->per.ee, &hw_param2);
  964. max_ch = hw_param2.gsi_num_ch_per_ee;
  965. break;
  966. }
  967. GSIDBG("max channels %d\n", max_ch);
  968. return max_ch;
  969. }
  970. static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
  971. {
  972. uint32_t max_ev = 0;
  973. struct gsihal_reg_hw_param hw_param;
  974. struct gsihal_reg_hw_param2 hw_param2;
  975. struct gsihal_reg_hw_param4 hw_param4;
  976. switch (ver) {
  977. case GSI_VER_ERR:
  978. case GSI_VER_MAX:
  979. GSIERR("GSI version is not supported %d\n", ver);
  980. WARN_ON(1);
  981. break;
  982. case GSI_VER_1_0:
  983. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  984. gsi_ctx->per.ee, &hw_param);
  985. max_ev = hw_param.gsi_ev_ch_num;
  986. break;
  987. case GSI_VER_1_2:
  988. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  989. gsi_ctx->per.ee, &hw_param);
  990. max_ev = hw_param.gsi_ev_ch_num;
  991. break;
  992. case GSI_VER_3_0:
  993. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_4,
  994. gsi_ctx->per.ee, &hw_param4);
  995. max_ev = hw_param4.gsi_num_ev_per_ee;
  996. break;
  997. default:
  998. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  999. gsi_ctx->per.ee, &hw_param2);
  1000. max_ev = hw_param2.gsi_num_ev_per_ee;
  1001. break;
  1002. }
  1003. GSIDBG("max event rings %d\n", max_ev);
  1004. return max_ev;
  1005. }
  1006. int gsi_complete_clk_grant(unsigned long dev_hdl)
  1007. {
  1008. unsigned long flags;
  1009. if (!gsi_ctx) {
  1010. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1011. return -GSI_STATUS_NODEV;
  1012. }
  1013. if (!gsi_ctx->per_registered) {
  1014. GSIERR("no client registered\n");
  1015. return -GSI_STATUS_INVALID_PARAMS;
  1016. }
  1017. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1018. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1019. gsi_ctx);
  1020. return -GSI_STATUS_INVALID_PARAMS;
  1021. }
  1022. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1023. gsi_handle_irq();
  1024. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  1025. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1026. return GSI_STATUS_SUCCESS;
  1027. }
  1028. EXPORT_SYMBOL(gsi_complete_clk_grant);
  1029. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  1030. {
  1031. if (!gsi_ctx) {
  1032. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1033. return -GSI_STATUS_NODEV;
  1034. }
  1035. gsi_ctx->base = devm_ioremap(
  1036. gsi_ctx->dev, gsi_base_addr, gsi_size);
  1037. if (!gsi_ctx->base) {
  1038. GSIERR("failed to map access to GSI HW\n");
  1039. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1040. }
  1041. GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
  1042. &gsi_base_addr,
  1043. gsi_ctx->base,
  1044. gsi_size);
  1045. /* initialize HAL before accessing any register */
  1046. gsihal_init(ver, gsi_ctx->base);
  1047. return 0;
  1048. }
  1049. EXPORT_SYMBOL(gsi_map_base);
  1050. int gsi_unmap_base(void)
  1051. {
  1052. if (!gsi_ctx) {
  1053. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1054. return -GSI_STATUS_NODEV;
  1055. }
  1056. if (!gsi_ctx->base) {
  1057. GSIERR("access to GSI HW has not been mapped\n");
  1058. return -GSI_STATUS_INVALID_PARAMS;
  1059. }
  1060. devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
  1061. gsi_ctx->base = NULL;
  1062. return 0;
  1063. }
  1064. EXPORT_SYMBOL(gsi_unmap_base);
  1065. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
  1066. {
  1067. int res;
  1068. struct gsihal_reg_gsi_status gsi_status;
  1069. struct gsihal_reg_gsi_ee_n_cntxt_gsi_irq gen_irq;
  1070. if (!gsi_ctx) {
  1071. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1072. return -GSI_STATUS_NODEV;
  1073. }
  1074. if (!props || !dev_hdl) {
  1075. GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
  1076. return -GSI_STATUS_INVALID_PARAMS;
  1077. }
  1078. if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
  1079. GSIERR("bad params gsi_ver=%d\n", props->ver);
  1080. return -GSI_STATUS_INVALID_PARAMS;
  1081. }
  1082. if (!props->notify_cb) {
  1083. GSIERR("notify callback must be provided\n");
  1084. return -GSI_STATUS_INVALID_PARAMS;
  1085. }
  1086. if (props->req_clk_cb && !props->rel_clk_cb) {
  1087. GSIERR("rel callback must be provided\n");
  1088. return -GSI_STATUS_INVALID_PARAMS;
  1089. }
  1090. if (gsi_ctx->per_registered) {
  1091. GSIERR("per already registered\n");
  1092. return -GSI_STATUS_UNSUPPORTED_OP;
  1093. }
  1094. spin_lock_init(&gsi_ctx->slock);
  1095. gsi_ctx->per = *props;
  1096. if (props->intr == GSI_INTR_IRQ) {
  1097. if (!props->irq) {
  1098. GSIERR("bad irq specified %u\n", props->irq);
  1099. return -GSI_STATUS_INVALID_PARAMS;
  1100. }
  1101. /*
  1102. * On a real UE, there are two separate interrupt
  1103. * vectors that get directed toward the GSI/IPA
  1104. * drivers. They are handled by gsi_isr() and
  1105. * (ipa_isr() or ipa3_isr()) respectively. In the
  1106. * emulation environment, this is not the case;
  1107. * instead, interrupt vectors are routed to the
  1108. * emualation hardware's interrupt controller, which
  1109. * in turn, forwards a single interrupt to the GSI/IPA
  1110. * driver. When the new interrupt vector is received,
  1111. * the driver needs to probe the interrupt
  1112. * controller's registers so see if one, the other, or
  1113. * both interrupts have occurred. Given the above, we
  1114. * now need to handle both situations, namely: the
  1115. * emulator's and the real UE.
  1116. */
  1117. if (running_emulation) {
  1118. /*
  1119. * New scheme involving the emulator's
  1120. * interrupt controller.
  1121. */
  1122. res = devm_request_threaded_irq(
  1123. gsi_ctx->dev,
  1124. props->irq,
  1125. /* top half handler to follow */
  1126. emulator_hard_irq_isr,
  1127. /* threaded bottom half handler to follow */
  1128. emulator_soft_irq_isr,
  1129. IRQF_SHARED,
  1130. "emulator_intcntrlr",
  1131. gsi_ctx);
  1132. } else {
  1133. /*
  1134. * Traditional scheme used on the real UE.
  1135. */
  1136. res = devm_request_irq(gsi_ctx->dev, props->irq,
  1137. gsi_isr,
  1138. props->req_clk_cb ? IRQF_TRIGGER_RISING :
  1139. IRQF_TRIGGER_HIGH,
  1140. "gsi",
  1141. gsi_ctx);
  1142. }
  1143. if (res) {
  1144. GSIERR(
  1145. "failed to register isr for %u\n",
  1146. props->irq);
  1147. return -GSI_STATUS_ERROR;
  1148. }
  1149. GSIDBG(
  1150. "succeeded to register isr for %u\n",
  1151. props->irq);
  1152. res = enable_irq_wake(props->irq);
  1153. if (res)
  1154. GSIERR("failed to enable wake irq %u\n", props->irq);
  1155. else
  1156. GSIERR("GSI irq is wake enabled %u\n", props->irq);
  1157. } else {
  1158. GSIERR("do not support interrupt type %u\n", props->intr);
  1159. return -GSI_STATUS_UNSUPPORTED_OP;
  1160. }
  1161. /*
  1162. * If base not previously mapped via gsi_map_base(), map it
  1163. * now...
  1164. */
  1165. if (!gsi_ctx->base) {
  1166. res = gsi_map_base(props->phys_addr, props->size, props->ver);
  1167. if (res)
  1168. return res;
  1169. }
  1170. if (running_emulation) {
  1171. GSIDBG("GSI SW ver register value 0x%x\n",
  1172. gsihal_read_reg_n(GSI_EE_n_GSI_SW_VERSION, 0));
  1173. gsi_ctx->intcntrlr_mem_size =
  1174. props->emulator_intcntrlr_size;
  1175. gsi_ctx->intcntrlr_base =
  1176. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
  1177. devm_ioremap(
  1178. #else
  1179. devm_ioremap_nocache(
  1180. #endif
  1181. gsi_ctx->dev,
  1182. props->emulator_intcntrlr_addr,
  1183. props->emulator_intcntrlr_size);
  1184. if (!gsi_ctx->intcntrlr_base) {
  1185. GSIERR(
  1186. "failed to remap emulator's interrupt controller HW\n");
  1187. gsi_unmap_base();
  1188. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1189. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1190. }
  1191. GSIDBG(
  1192. "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
  1193. &(props->emulator_intcntrlr_addr),
  1194. gsi_ctx->intcntrlr_base,
  1195. props->emulator_intcntrlr_size);
  1196. gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
  1197. gsi_ctx->intcntrlr_client_isr =
  1198. props->emulator_intcntrlr_client_isr;
  1199. }
  1200. gsi_ctx->per_registered = true;
  1201. mutex_init(&gsi_ctx->mlock);
  1202. atomic_set(&gsi_ctx->num_chan, 0);
  1203. atomic_set(&gsi_ctx->num_evt_ring, 0);
  1204. gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
  1205. if (gsi_ctx->max_ch == 0) {
  1206. gsi_unmap_base();
  1207. if (running_emulation)
  1208. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1209. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1210. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1211. GSIERR("failed to get max channels\n");
  1212. return -GSI_STATUS_ERROR;
  1213. }
  1214. gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
  1215. if (gsi_ctx->max_ev == 0) {
  1216. gsi_unmap_base();
  1217. if (running_emulation)
  1218. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1219. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1220. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1221. GSIERR("failed to get max event rings\n");
  1222. return -GSI_STATUS_ERROR;
  1223. }
  1224. if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
  1225. GSIERR("max event rings are beyond absolute maximum\n");
  1226. return -GSI_STATUS_ERROR;
  1227. }
  1228. if (props->mhi_er_id_limits_valid &&
  1229. props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
  1230. gsi_unmap_base();
  1231. if (running_emulation)
  1232. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1233. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1234. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1235. GSIERR("MHI event ring start id %u is beyond max %u\n",
  1236. props->mhi_er_id_limits[0], gsi_ctx->max_ev);
  1237. return -GSI_STATUS_ERROR;
  1238. }
  1239. gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
  1240. /* exclude reserved mhi events */
  1241. if (props->mhi_er_id_limits_valid)
  1242. gsi_ctx->evt_bmap |=
  1243. ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
  1244. ((1 << (props->mhi_er_id_limits[0])) - 1);
  1245. /*
  1246. * enable all interrupts but GSI_BREAK_POINT.
  1247. * Inter EE commands / interrupt are no supported.
  1248. */
  1249. __gsi_config_type_irq(props->ee, ~0, ~0);
  1250. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1251. __gsi_config_all_ch_irq(props->ee, ~0, ~0);
  1252. __gsi_config_all_evt_irq(props->ee, ~0, ~0);
  1253. __gsi_config_all_ieob_irq(props->ee, ~0, ~0);
  1254. }
  1255. else {
  1256. __gsi_config_ch_irq(props->ee, ~0, ~0);
  1257. __gsi_config_evt_irq(props->ee, ~0, ~0);
  1258. __gsi_config_ieob_irq(props->ee, ~0, ~0);
  1259. }
  1260. __gsi_config_glob_irq(props->ee, ~0, ~0);
  1261. /*
  1262. * Disabling global INT1 interrupt by default and enable it
  1263. * onlt when sending the generic command.
  1264. */
  1265. __gsi_config_glob_irq(props->ee,
  1266. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  1267. gen_irq.gsi_mcs_stack_ovrflow = 1;
  1268. gen_irq.gsi_cmd_fifo_ovrflow = 1;
  1269. gen_irq.gsi_bus_error = 1;
  1270. gen_irq.gsi_break_point = 0;
  1271. gsihal_write_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_EN,
  1272. gsi_ctx->per.ee, &gen_irq);
  1273. gsihal_write_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee, props->intr);
  1274. /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
  1275. if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
  1276. (props->intr != GSI_INTR_MSI)) {
  1277. gsihal_write_reg_n(
  1278. GSI_EE_n_CNTXT_MSI_BASE_LSB, gsi_ctx->per.ee, 0);
  1279. gsihal_write_reg_n(
  1280. GSI_EE_n_CNTXT_MSI_BASE_MSB, gsi_ctx->per.ee, 0);
  1281. }
  1282. gsihal_read_reg_n_fields(GSI_EE_n_GSI_STATUS,
  1283. gsi_ctx->per.ee, &gsi_status);
  1284. if (gsi_status.enabled)
  1285. gsi_ctx->enabled = true;
  1286. else
  1287. GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
  1288. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  1289. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, gsi_ctx->per.ee, 0);
  1290. /* Reset to zero scratch_1 register*/
  1291. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_1, gsi_ctx->per.ee, 0);
  1292. if (running_emulation) {
  1293. /*
  1294. * Set up the emulator's interrupt controller...
  1295. */
  1296. res = setup_emulator_cntrlr(
  1297. gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
  1298. if (res != 0) {
  1299. gsi_unmap_base();
  1300. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1301. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1302. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1303. GSIERR("setup_emulator_cntrlr() failed\n");
  1304. return res;
  1305. }
  1306. }
  1307. *dev_hdl = (uintptr_t)gsi_ctx;
  1308. gsi_ctx->gsi_isr_cache_index = 0;
  1309. return GSI_STATUS_SUCCESS;
  1310. }
  1311. EXPORT_SYMBOL(gsi_register_device);
  1312. int gsi_write_device_scratch(unsigned long dev_hdl,
  1313. struct gsi_device_scratch *val)
  1314. {
  1315. unsigned int max_usb_pkt_size = 0;
  1316. if (!gsi_ctx) {
  1317. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1318. return -GSI_STATUS_NODEV;
  1319. }
  1320. if (!gsi_ctx->per_registered) {
  1321. GSIERR("no client registered\n");
  1322. return -GSI_STATUS_INVALID_PARAMS;
  1323. }
  1324. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1325. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1326. gsi_ctx);
  1327. return -GSI_STATUS_INVALID_PARAMS;
  1328. }
  1329. if (val->max_usb_pkt_size_valid &&
  1330. val->max_usb_pkt_size != 1024 &&
  1331. val->max_usb_pkt_size != 512 &&
  1332. val->max_usb_pkt_size != 64) {
  1333. GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
  1334. val->max_usb_pkt_size);
  1335. return -GSI_STATUS_INVALID_PARAMS;
  1336. }
  1337. mutex_lock(&gsi_ctx->mlock);
  1338. if (val->mhi_base_chan_idx_valid)
  1339. gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
  1340. val->mhi_base_chan_idx;
  1341. if (val->max_usb_pkt_size_valid) {
  1342. max_usb_pkt_size = 2;
  1343. if (val->max_usb_pkt_size > 64)
  1344. max_usb_pkt_size =
  1345. (val->max_usb_pkt_size == 1024) ? 1 : 0;
  1346. gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
  1347. }
  1348. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  1349. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  1350. mutex_unlock(&gsi_ctx->mlock);
  1351. return GSI_STATUS_SUCCESS;
  1352. }
  1353. EXPORT_SYMBOL(gsi_write_device_scratch);
  1354. int gsi_deregister_device(unsigned long dev_hdl, bool force)
  1355. {
  1356. if (!gsi_ctx) {
  1357. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1358. return -GSI_STATUS_NODEV;
  1359. }
  1360. if (!gsi_ctx->per_registered) {
  1361. GSIERR("no client registered\n");
  1362. return -GSI_STATUS_INVALID_PARAMS;
  1363. }
  1364. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1365. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1366. gsi_ctx);
  1367. return -GSI_STATUS_INVALID_PARAMS;
  1368. }
  1369. if (!force && atomic_read(&gsi_ctx->num_chan)) {
  1370. GSIERR("cannot deregister %u channels are still connected\n",
  1371. atomic_read(&gsi_ctx->num_chan));
  1372. return -GSI_STATUS_UNSUPPORTED_OP;
  1373. }
  1374. if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
  1375. GSIERR("cannot deregister %u events are still connected\n",
  1376. atomic_read(&gsi_ctx->num_evt_ring));
  1377. return -GSI_STATUS_UNSUPPORTED_OP;
  1378. }
  1379. /* disable all interrupts */
  1380. __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
  1381. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1382. __gsi_config_all_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1383. __gsi_config_all_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1384. __gsi_config_all_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1385. }
  1386. else {
  1387. __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1388. __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1389. __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1390. }
  1391. __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
  1392. __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
  1393. devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
  1394. gsihal_destroy();
  1395. gsi_unmap_base();
  1396. memset(gsi_ctx, 0, sizeof(*gsi_ctx));
  1397. return GSI_STATUS_SUCCESS;
  1398. }
  1399. EXPORT_SYMBOL(gsi_deregister_device);
  1400. static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
  1401. uint8_t evt_id, unsigned int ee)
  1402. {
  1403. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  1404. struct gsihal_reg_ev_ch_k_cntxt_1 ev_ch_k_cntxt_1;
  1405. struct gsihal_reg_ev_ch_k_cntxt_2 ev_ch_k_cntxt_2;
  1406. struct gsihal_reg_ev_ch_k_cntxt_3 ev_ch_k_cntxt_3;
  1407. struct gsihal_reg_ev_ch_k_cntxt_8 ev_ch_k_cntxt_8;
  1408. struct gsihal_reg_ev_ch_k_cntxt_9 ev_ch_k_cntxt_9;
  1409. struct gsihal_reg_ev_ch_k_cntxt_10 ev_ch_k_cntxt_10;
  1410. struct gsihal_reg_ev_ch_k_cntxt_11 ev_ch_k_cntxt_11;
  1411. struct gsihal_reg_ev_ch_k_cntxt_12 ev_ch_k_cntxt_12;
  1412. struct gsihal_reg_ev_ch_k_cntxt_13 ev_ch_k_cntxt_13;
  1413. GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
  1414. props->re_size);
  1415. ev_ch_k_cntxt_0.chtype = props->intf;
  1416. ev_ch_k_cntxt_0.intype = props->intr;
  1417. ev_ch_k_cntxt_0.element_size = props->re_size;
  1418. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  1419. ee, evt_id, &ev_ch_k_cntxt_0);
  1420. ev_ch_k_cntxt_1.r_length = props->ring_len;
  1421. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_1,
  1422. ee, evt_id,
  1423. &ev_ch_k_cntxt_1);
  1424. ev_ch_k_cntxt_2.r_base_addr_lsbs = GSI_LSB(props->ring_base_addr);
  1425. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_2,
  1426. ee, evt_id,
  1427. &ev_ch_k_cntxt_2);
  1428. ev_ch_k_cntxt_3.r_base_addr_msbs = GSI_MSB(props->ring_base_addr);
  1429. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_3,
  1430. ee, evt_id,
  1431. &ev_ch_k_cntxt_3);
  1432. ev_ch_k_cntxt_8.int_modt = props->int_modt;
  1433. ev_ch_k_cntxt_8.int_modc = props->int_modc;
  1434. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_8,
  1435. ee, evt_id,
  1436. &ev_ch_k_cntxt_8);
  1437. ev_ch_k_cntxt_9.intvec = props->intvec;
  1438. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_9,
  1439. ee, evt_id,
  1440. &ev_ch_k_cntxt_9);
  1441. ev_ch_k_cntxt_10.msi_addr_lsb = GSI_LSB(props->msi_addr);
  1442. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_10,
  1443. ee, evt_id,
  1444. &ev_ch_k_cntxt_10);
  1445. ev_ch_k_cntxt_11.msi_addr_msb = GSI_MSB(props->msi_addr);
  1446. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_11,
  1447. ee, evt_id,
  1448. &ev_ch_k_cntxt_11);
  1449. ev_ch_k_cntxt_12.rp_update_addr_lsb = GSI_LSB(props->rp_update_addr);
  1450. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_12,
  1451. ee, evt_id,
  1452. &ev_ch_k_cntxt_12);
  1453. ev_ch_k_cntxt_13.rp_update_addr_msb = GSI_MSB(props->rp_update_addr);
  1454. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_13,
  1455. ee, evt_id,
  1456. &ev_ch_k_cntxt_13);
  1457. }
  1458. static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
  1459. struct gsi_ring_ctx *ctx)
  1460. {
  1461. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1462. ctx->base = props->ring_base_addr;
  1463. ctx->wp = ctx->base;
  1464. ctx->rp = ctx->base;
  1465. ctx->wp_local = ctx->base;
  1466. ctx->rp_local = ctx->base;
  1467. ctx->len = props->ring_len;
  1468. ctx->elem_sz = props->re_size;
  1469. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1470. ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
  1471. if (props->rp_update_vaddr)
  1472. *(uint64_t *)(props->rp_update_vaddr) = ctx->rp_local;
  1473. }
  1474. static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
  1475. {
  1476. unsigned long flags;
  1477. struct gsihal_reg_gsi_ee_n_ev_ch_k_doorbell_1 db;
  1478. spin_lock_irqsave(&ctx->ring.slock, flags);
  1479. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1480. ctx->ring.wp_local = ctx->ring.base +
  1481. ctx->ring.max_num_elem * ctx->ring.elem_sz;
  1482. /* write order MUST be MSB followed by LSB */
  1483. db.write_ptr_msb = GSI_MSB(ctx->ring.wp_local);
  1484. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_DOORBELL_1,
  1485. gsi_ctx->per.ee, ctx->id, &db);
  1486. gsi_ring_evt_doorbell(ctx);
  1487. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1488. }
  1489. static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
  1490. {
  1491. unsigned long flags;
  1492. spin_lock_irqsave(&ctx->ring.slock, flags);
  1493. if (ctx->ring.base_va)
  1494. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1495. ctx->ring.wp_local = ctx->ring.base +
  1496. ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
  1497. gsi_ring_evt_doorbell(ctx);
  1498. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1499. }
  1500. static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
  1501. {
  1502. uint64_t ra;
  1503. if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
  1504. props->ring_len % 4) ||
  1505. (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
  1506. props->ring_len % 8) ||
  1507. (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
  1508. props->ring_len % 16) ||
  1509. (props->re_size == GSI_EVT_RING_RE_SIZE_32B &&
  1510. props->ring_len % 32)) {
  1511. GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
  1512. props->ring_len, props->re_size);
  1513. return -GSI_STATUS_INVALID_PARAMS;
  1514. }
  1515. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  1516. return -GSI_STATUS_INVALID_PARAMS;
  1517. ra = props->ring_base_addr;
  1518. do_div(ra, roundup_pow_of_two(props->ring_len));
  1519. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1520. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1521. props->ring_base_addr,
  1522. roundup_pow_of_two(props->ring_len));
  1523. return -GSI_STATUS_INVALID_PARAMS;
  1524. }
  1525. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1526. !props->ring_base_vaddr) {
  1527. GSIERR("protocol %u requires ring base VA\n", props->intf);
  1528. return -GSI_STATUS_INVALID_PARAMS;
  1529. }
  1530. if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
  1531. (!props->evchid_valid ||
  1532. props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
  1533. props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
  1534. GSIERR("MHI requires evchid valid=%d val=%u\n",
  1535. props->evchid_valid, props->evchid);
  1536. return -GSI_STATUS_INVALID_PARAMS;
  1537. }
  1538. if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
  1539. props->evchid_valid) {
  1540. GSIERR("protocol %u cannot specify evchid\n", props->intf);
  1541. return -GSI_STATUS_INVALID_PARAMS;
  1542. }
  1543. if (!props->err_cb) {
  1544. GSIERR("err callback must be provided\n");
  1545. return -GSI_STATUS_INVALID_PARAMS;
  1546. }
  1547. return GSI_STATUS_SUCCESS;
  1548. }
  1549. /**
  1550. * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
  1551. * by IPA driver. Need to do this in GSI since only GSI knows which TRE
  1552. * are being used or not. However, IPA is the one that does cleaning,
  1553. * therefore we pass a callback from IPA and call it using params from GSI
  1554. *
  1555. * @chan_hdl: hdl of the gsi channel user data array to be cleaned
  1556. * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
  1557. * @chan_user_data: ipa_sys_context of the gsi_channel
  1558. * @xfer_uder_data: user data array element (rx_pkt wrapper)
  1559. *
  1560. * Returns: 0 on success, negative on failure
  1561. */
  1562. static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
  1563. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
  1564. {
  1565. struct gsi_chan_ctx *ctx;
  1566. uint64_t i;
  1567. uint16_t rp_idx;
  1568. ctx = &gsi_ctx->chan[chan_hdl];
  1569. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  1570. GSIERR("bad state %d\n", ctx->state);
  1571. return -GSI_STATUS_UNSUPPORTED_OP;
  1572. }
  1573. /* for coalescing, traverse the whole array */
  1574. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  1575. size_t user_data_size =
  1576. ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
  1577. for (i = 0; i < user_data_size; i++) {
  1578. if (ctx->user_data[i].valid)
  1579. cleanup_cb(ctx->props.chan_user_data,
  1580. ctx->user_data[i].p);
  1581. }
  1582. } else {
  1583. /* for non-coalescing, clean between RP and WP */
  1584. while (ctx->ring.rp_local != ctx->ring.wp_local) {
  1585. rp_idx = gsi_find_idx_from_addr(&ctx->ring,
  1586. ctx->ring.rp_local);
  1587. WARN_ON(!ctx->user_data[rp_idx].valid);
  1588. cleanup_cb(ctx->props.chan_user_data,
  1589. ctx->user_data[rp_idx].p);
  1590. gsi_incr_ring_rp(&ctx->ring);
  1591. }
  1592. }
  1593. return 0;
  1594. }
  1595. /**
  1596. * gsi_read_event_ring_rp_ddr - function returns the RP value of the event
  1597. * ring read from the ring context register.
  1598. *
  1599. * @props: Props structere of the event channel
  1600. * @id: Event channel index
  1601. * @ee: EE
  1602. *
  1603. * @Return pointer to the read pointer
  1604. */
  1605. static inline uint64_t gsi_read_event_ring_rp_ddr(struct gsi_evt_ring_props* props,
  1606. uint8_t id, int ee)
  1607. {
  1608. return readl_relaxed(props->rp_update_vaddr);
  1609. }
  1610. /**
  1611. * gsi_read_event_ring_rp_reg - function returns the RP value of the event ring
  1612. * read from the DDR.
  1613. *
  1614. * @props: Props structere of the event channel
  1615. * @id: Event channel index
  1616. * @ee: EE
  1617. *
  1618. * @Return pointer to the read pointer
  1619. */
  1620. static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
  1621. uint8_t id, int ee)
  1622. {
  1623. return gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
  1624. }
  1625. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1626. unsigned long *evt_ring_hdl)
  1627. {
  1628. unsigned long evt_id;
  1629. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
  1630. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1631. struct gsi_evt_ctx *ctx;
  1632. int res;
  1633. int ee;
  1634. unsigned long flags;
  1635. if (!gsi_ctx) {
  1636. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1637. return -GSI_STATUS_NODEV;
  1638. }
  1639. if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  1640. GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
  1641. props, dev_hdl, evt_ring_hdl);
  1642. return -GSI_STATUS_INVALID_PARAMS;
  1643. }
  1644. if (gsi_validate_evt_ring_props(props)) {
  1645. GSIERR("invalid params\n");
  1646. return -GSI_STATUS_INVALID_PARAMS;
  1647. }
  1648. if (!props->evchid_valid) {
  1649. mutex_lock(&gsi_ctx->mlock);
  1650. evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
  1651. sizeof(unsigned long) * BITS_PER_BYTE);
  1652. if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
  1653. GSIERR("failed to alloc event ID\n");
  1654. mutex_unlock(&gsi_ctx->mlock);
  1655. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1656. }
  1657. set_bit(evt_id, &gsi_ctx->evt_bmap);
  1658. mutex_unlock(&gsi_ctx->mlock);
  1659. } else {
  1660. evt_id = props->evchid;
  1661. }
  1662. GSIDBG("Using %lu as virt evt id\n", evt_id);
  1663. if (props->rp_update_addr != 0) {
  1664. GSIDBG("Using DDR to read event RP for virt evt id: %lu\n",
  1665. evt_id);
  1666. props->gsi_read_event_ring_rp =
  1667. gsi_read_event_ring_rp_ddr;
  1668. }
  1669. else {
  1670. GSIDBG("Using CONTEXT reg to read event RP for virt evt id: %lu\n",
  1671. evt_id);
  1672. props->gsi_read_event_ring_rp =
  1673. gsi_read_event_ring_rp_reg;
  1674. }
  1675. ctx = &gsi_ctx->evtr[evt_id];
  1676. memset(ctx, 0, sizeof(*ctx));
  1677. mutex_init(&ctx->mlock);
  1678. init_completion(&ctx->compl);
  1679. atomic_set(&ctx->chan_ref_cnt, 0);
  1680. ctx->num_of_chan_allocated = 0;
  1681. ctx->props = *props;
  1682. mutex_lock(&gsi_ctx->mlock);
  1683. ee = gsi_ctx->per.ee;
  1684. ev_ch_cmd.opcode = op;
  1685. ev_ch_cmd.chid = evt_id;
  1686. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD, ee, &ev_ch_cmd);
  1687. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1688. if (res == 0) {
  1689. GSIERR("evt_id=%lu timed out\n", evt_id);
  1690. if (!props->evchid_valid)
  1691. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1692. mutex_unlock(&gsi_ctx->mlock);
  1693. return -GSI_STATUS_TIMED_OUT;
  1694. }
  1695. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1696. GSIERR("evt_id=%lu allocation failed state=%u\n",
  1697. evt_id, ctx->state);
  1698. if (!props->evchid_valid)
  1699. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1700. mutex_unlock(&gsi_ctx->mlock);
  1701. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1702. }
  1703. gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
  1704. spin_lock_init(&ctx->ring.slock);
  1705. gsi_init_evt_ring(props, &ctx->ring);
  1706. ctx->id = evt_id;
  1707. *evt_ring_hdl = evt_id;
  1708. atomic_inc(&gsi_ctx->num_evt_ring);
  1709. if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
  1710. gsi_prime_evt_ring(ctx);
  1711. else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
  1712. gsi_prime_evt_ring_wdi(ctx);
  1713. mutex_unlock(&gsi_ctx->mlock);
  1714. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1715. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1716. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
  1717. gsihal_get_ch_reg_idx(evt_id), gsihal_get_ch_reg_mask(evt_id));
  1718. }
  1719. else {
  1720. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, 1 << evt_id);
  1721. }
  1722. /* enable ieob interrupts for GPI, enable MSI interrupts */
  1723. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1724. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1725. (props->intr != GSI_INTR_MSI))
  1726. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1727. gsihal_get_ch_reg_mask(evt_id),
  1728. 0);
  1729. else
  1730. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1731. gsihal_get_ch_reg_mask(evt_id),
  1732. ~0);
  1733. }
  1734. else {
  1735. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1736. (props->intr != GSI_INTR_MSI))
  1737. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
  1738. else
  1739. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
  1740. }
  1741. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1742. return GSI_STATUS_SUCCESS;
  1743. }
  1744. EXPORT_SYMBOL(gsi_alloc_evt_ring);
  1745. static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1746. union __packed gsi_evt_scratch val)
  1747. {
  1748. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_0,
  1749. gsi_ctx->per.ee, evt_ring_hdl, val.data.word1);
  1750. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_1,
  1751. gsi_ctx->per.ee, evt_ring_hdl, val.data.word2);
  1752. }
  1753. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1754. union __packed gsi_evt_scratch val)
  1755. {
  1756. struct gsi_evt_ctx *ctx;
  1757. if (!gsi_ctx) {
  1758. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1759. return -GSI_STATUS_NODEV;
  1760. }
  1761. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1762. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1763. return -GSI_STATUS_INVALID_PARAMS;
  1764. }
  1765. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1766. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1767. GSIERR("bad state %d\n",
  1768. gsi_ctx->evtr[evt_ring_hdl].state);
  1769. return -GSI_STATUS_UNSUPPORTED_OP;
  1770. }
  1771. mutex_lock(&ctx->mlock);
  1772. ctx->scratch = val;
  1773. __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
  1774. mutex_unlock(&ctx->mlock);
  1775. return GSI_STATUS_SUCCESS;
  1776. }
  1777. EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
  1778. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
  1779. {
  1780. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1781. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
  1782. struct gsi_evt_ctx *ctx;
  1783. int res;
  1784. if (!gsi_ctx) {
  1785. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1786. return -GSI_STATUS_NODEV;
  1787. }
  1788. if (evt_ring_hdl >= gsi_ctx->max_ev ||
  1789. evt_ring_hdl >= GSI_EVT_RING_MAX) {
  1790. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1791. return -GSI_STATUS_INVALID_PARAMS;
  1792. }
  1793. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1794. if (atomic_read(&ctx->chan_ref_cnt)) {
  1795. GSIERR("%d channels still using this event ring\n",
  1796. atomic_read(&ctx->chan_ref_cnt));
  1797. return -GSI_STATUS_UNSUPPORTED_OP;
  1798. }
  1799. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1800. GSIERR("bad state %d\n", ctx->state);
  1801. return -GSI_STATUS_UNSUPPORTED_OP;
  1802. }
  1803. mutex_lock(&gsi_ctx->mlock);
  1804. reinit_completion(&ctx->compl);
  1805. ev_ch_cmd.chid = evt_ring_hdl;
  1806. ev_ch_cmd.opcode = op;
  1807. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  1808. gsi_ctx->per.ee, &ev_ch_cmd);
  1809. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1810. if (res == 0) {
  1811. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1812. mutex_unlock(&gsi_ctx->mlock);
  1813. return -GSI_STATUS_TIMED_OUT;
  1814. }
  1815. if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1816. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1817. ctx->state);
  1818. /*
  1819. * IPA Hardware returned GSI RING not allocated, which is
  1820. * unexpected hardware state.
  1821. */
  1822. GSI_ASSERT();
  1823. }
  1824. mutex_unlock(&gsi_ctx->mlock);
  1825. if (!ctx->props.evchid_valid) {
  1826. mutex_lock(&gsi_ctx->mlock);
  1827. clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
  1828. mutex_unlock(&gsi_ctx->mlock);
  1829. }
  1830. atomic_dec(&gsi_ctx->num_evt_ring);
  1831. return GSI_STATUS_SUCCESS;
  1832. }
  1833. EXPORT_SYMBOL(gsi_dealloc_evt_ring);
  1834. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1835. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  1836. {
  1837. struct gsi_evt_ctx *ctx;
  1838. if (!gsi_ctx) {
  1839. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1840. return -GSI_STATUS_NODEV;
  1841. }
  1842. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  1843. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  1844. db_addr_wp_lsb);
  1845. return -GSI_STATUS_INVALID_PARAMS;
  1846. }
  1847. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1848. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1849. return -GSI_STATUS_INVALID_PARAMS;
  1850. }
  1851. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1852. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1853. GSIERR("bad state %d\n",
  1854. gsi_ctx->evtr[evt_ring_hdl].state);
  1855. return -GSI_STATUS_UNSUPPORTED_OP;
  1856. }
  1857. *db_addr_wp_lsb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  1858. GSI_EE_n_EV_CH_k_DOORBELL_0, gsi_ctx->per.ee, evt_ring_hdl);
  1859. *db_addr_wp_msb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  1860. GSI_EE_n_EV_CH_k_DOORBELL_1, gsi_ctx->per.ee, evt_ring_hdl);
  1861. return GSI_STATUS_SUCCESS;
  1862. }
  1863. EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
  1864. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
  1865. {
  1866. struct gsi_evt_ctx *ctx;
  1867. if (!gsi_ctx) {
  1868. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1869. return -GSI_STATUS_NODEV;
  1870. }
  1871. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1872. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1873. return -GSI_STATUS_INVALID_PARAMS;
  1874. }
  1875. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1876. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1877. GSIERR("bad state %d\n",
  1878. gsi_ctx->evtr[evt_ring_hdl].state);
  1879. return -GSI_STATUS_UNSUPPORTED_OP;
  1880. }
  1881. ctx->ring.wp_local = value;
  1882. gsi_ring_evt_doorbell(ctx);
  1883. return GSI_STATUS_SUCCESS;
  1884. }
  1885. EXPORT_SYMBOL(gsi_ring_evt_ring_db);
  1886. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
  1887. {
  1888. struct gsi_chan_ctx *ctx;
  1889. if (!gsi_ctx) {
  1890. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1891. return -GSI_STATUS_NODEV;
  1892. }
  1893. if (chan_hdl >= gsi_ctx->max_ch) {
  1894. GSIERR("bad chan_hdl=%lu\n", chan_hdl);
  1895. return -GSI_STATUS_INVALID_PARAMS;
  1896. }
  1897. ctx = &gsi_ctx->chan[chan_hdl];
  1898. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  1899. GSIERR("bad state %d\n", ctx->state);
  1900. return -GSI_STATUS_UNSUPPORTED_OP;
  1901. }
  1902. ctx->ring.wp_local = value;
  1903. /* write MSB first */
  1904. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  1905. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  1906. gsi_ring_chan_doorbell(ctx);
  1907. return GSI_STATUS_SUCCESS;
  1908. }
  1909. EXPORT_SYMBOL(gsi_ring_ch_ring_db);
  1910. int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
  1911. {
  1912. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1913. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
  1914. struct gsi_evt_ctx *ctx;
  1915. int res;
  1916. if (!gsi_ctx) {
  1917. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1918. return -GSI_STATUS_NODEV;
  1919. }
  1920. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1921. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1922. return -GSI_STATUS_INVALID_PARAMS;
  1923. }
  1924. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1925. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1926. GSIERR("bad state %d\n", ctx->state);
  1927. return -GSI_STATUS_UNSUPPORTED_OP;
  1928. }
  1929. mutex_lock(&gsi_ctx->mlock);
  1930. reinit_completion(&ctx->compl);
  1931. ev_ch_cmd.chid = evt_ring_hdl;
  1932. ev_ch_cmd.opcode = op;
  1933. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  1934. gsi_ctx->per.ee, &ev_ch_cmd);
  1935. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1936. if (res == 0) {
  1937. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1938. mutex_unlock(&gsi_ctx->mlock);
  1939. return -GSI_STATUS_TIMED_OUT;
  1940. }
  1941. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1942. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1943. ctx->state);
  1944. /*
  1945. * IPA Hardware returned GSI RING not allocated, which is
  1946. * unexpected. Indicates hardware instability.
  1947. */
  1948. GSI_ASSERT();
  1949. }
  1950. gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
  1951. gsi_init_evt_ring(&ctx->props, &ctx->ring);
  1952. /* restore scratch */
  1953. __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
  1954. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
  1955. gsi_prime_evt_ring(ctx);
  1956. if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
  1957. gsi_prime_evt_ring_wdi(ctx);
  1958. mutex_unlock(&gsi_ctx->mlock);
  1959. return GSI_STATUS_SUCCESS;
  1960. }
  1961. EXPORT_SYMBOL(gsi_reset_evt_ring);
  1962. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1963. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1964. {
  1965. struct gsi_evt_ctx *ctx;
  1966. if (!gsi_ctx) {
  1967. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1968. return -GSI_STATUS_NODEV;
  1969. }
  1970. if (!props || !scr) {
  1971. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  1972. return -GSI_STATUS_INVALID_PARAMS;
  1973. }
  1974. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1975. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1976. return -GSI_STATUS_INVALID_PARAMS;
  1977. }
  1978. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1979. if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1980. GSIERR("bad state %d\n", ctx->state);
  1981. return -GSI_STATUS_UNSUPPORTED_OP;
  1982. }
  1983. mutex_lock(&ctx->mlock);
  1984. *props = ctx->props;
  1985. *scr = ctx->scratch;
  1986. mutex_unlock(&ctx->mlock);
  1987. return GSI_STATUS_SUCCESS;
  1988. }
  1989. EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
  1990. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1991. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1992. {
  1993. struct gsi_evt_ctx *ctx;
  1994. if (!gsi_ctx) {
  1995. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1996. return -GSI_STATUS_NODEV;
  1997. }
  1998. if (!props || gsi_validate_evt_ring_props(props)) {
  1999. GSIERR("bad params props=%pK\n", props);
  2000. return -GSI_STATUS_INVALID_PARAMS;
  2001. }
  2002. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  2003. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  2004. return -GSI_STATUS_INVALID_PARAMS;
  2005. }
  2006. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  2007. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  2008. GSIERR("bad state %d\n", ctx->state);
  2009. return -GSI_STATUS_UNSUPPORTED_OP;
  2010. }
  2011. if (ctx->props.exclusive != props->exclusive) {
  2012. GSIERR("changing immutable fields not supported\n");
  2013. return -GSI_STATUS_UNSUPPORTED_OP;
  2014. }
  2015. mutex_lock(&ctx->mlock);
  2016. ctx->props = *props;
  2017. if (scr)
  2018. ctx->scratch = *scr;
  2019. mutex_unlock(&ctx->mlock);
  2020. return gsi_reset_evt_ring(evt_ring_hdl);
  2021. }
  2022. EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
  2023. static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
  2024. unsigned int ee)
  2025. {
  2026. struct gsihal_reg_gsi_ee_n_gsi_ch_k_qos ch_k_qos;
  2027. ch_k_qos.wrr_weight = props->low_weight;
  2028. ch_k_qos.max_prefetch = props->max_prefetch;
  2029. ch_k_qos.use_db_eng = props->use_db_eng;
  2030. if (gsi_ctx->per.ver >= GSI_VER_2_0) {
  2031. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  2032. ch_k_qos.use_escape_buf_only = props->prefetch_mode;
  2033. } else {
  2034. ch_k_qos.prefetch_mode = props->prefetch_mode;
  2035. ch_k_qos.empty_lvl_thrshold =
  2036. props->empty_lvl_threshold;
  2037. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  2038. ch_k_qos.db_in_bytes = props->db_in_bytes;
  2039. }
  2040. }
  2041. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_QOS,
  2042. ee, props->ch_id, &ch_k_qos);
  2043. }
  2044. static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
  2045. uint8_t erindex)
  2046. {
  2047. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  2048. struct gsihal_reg_ch_k_cntxt_1 ch_k_cntxt_1;
  2049. switch (props->prot) {
  2050. case GSI_CHAN_PROT_MHI:
  2051. case GSI_CHAN_PROT_XHCI:
  2052. case GSI_CHAN_PROT_GPI:
  2053. case GSI_CHAN_PROT_XDCI:
  2054. case GSI_CHAN_PROT_WDI2:
  2055. case GSI_CHAN_PROT_WDI3:
  2056. case GSI_CHAN_PROT_GCI:
  2057. case GSI_CHAN_PROT_MHIP:
  2058. ch_k_cntxt_0.chtype_protocol_msb = 0;
  2059. break;
  2060. case GSI_CHAN_PROT_AQC:
  2061. case GSI_CHAN_PROT_11AD:
  2062. case GSI_CHAN_PROT_RTK:
  2063. case GSI_CHAN_PROT_QDSS:
  2064. ch_k_cntxt_0.chtype_protocol_msb = 1;
  2065. break;
  2066. default:
  2067. GSIERR("Unsupported protocol %d\n", props->prot);
  2068. WARN_ON(1);
  2069. return;
  2070. }
  2071. ch_k_cntxt_0.chtype_protocol = props->prot;
  2072. ch_k_cntxt_0.chtype_dir = props->dir;
  2073. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2074. ch_k_cntxt_1.erindex = erindex;
  2075. } else {
  2076. ch_k_cntxt_0.erindex = erindex;
  2077. }
  2078. ch_k_cntxt_0.element_size = props->re_size;
  2079. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2080. ee, props->ch_id, &ch_k_cntxt_0);
  2081. ch_k_cntxt_1.r_length = props->ring_len;
  2082. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2083. ee, props->ch_id, &ch_k_cntxt_1);
  2084. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2085. ee, props->ch_id, GSI_LSB(props->ring_base_addr));
  2086. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2087. ee, props->ch_id, GSI_MSB(props->ring_base_addr));
  2088. gsi_program_chan_ctx_qos(props, ee);
  2089. }
  2090. static void gsi_init_chan_ring(struct gsi_chan_props *props,
  2091. struct gsi_ring_ctx *ctx)
  2092. {
  2093. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  2094. ctx->base = props->ring_base_addr;
  2095. ctx->wp = ctx->base;
  2096. ctx->rp = ctx->base;
  2097. ctx->wp_local = ctx->base;
  2098. ctx->rp_local = ctx->base;
  2099. ctx->len = props->ring_len;
  2100. ctx->elem_sz = props->re_size;
  2101. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  2102. ctx->end = ctx->base + (ctx->max_num_elem + 1) *
  2103. ctx->elem_sz;
  2104. }
  2105. static int gsi_validate_channel_props(struct gsi_chan_props *props)
  2106. {
  2107. uint64_t ra;
  2108. uint64_t last;
  2109. if (props->ch_id >= gsi_ctx->max_ch) {
  2110. GSIERR("ch_id %u invalid\n", props->ch_id);
  2111. return -GSI_STATUS_INVALID_PARAMS;
  2112. }
  2113. if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
  2114. props->ring_len % 4) ||
  2115. (props->re_size == GSI_CHAN_RE_SIZE_8B &&
  2116. props->ring_len % 8) ||
  2117. (props->re_size == GSI_CHAN_RE_SIZE_16B &&
  2118. props->ring_len % 16) ||
  2119. (props->re_size == GSI_CHAN_RE_SIZE_32B &&
  2120. props->ring_len % 32) ||
  2121. (props->re_size == GSI_CHAN_RE_SIZE_64B &&
  2122. props->ring_len % 64)) {
  2123. GSIERR("bad params ring_len %u not a multiple of re size %u\n",
  2124. props->ring_len, props->re_size);
  2125. return -GSI_STATUS_INVALID_PARAMS;
  2126. }
  2127. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  2128. return -GSI_STATUS_INVALID_PARAMS;
  2129. ra = props->ring_base_addr;
  2130. do_div(ra, roundup_pow_of_two(props->ring_len));
  2131. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  2132. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  2133. props->ring_base_addr,
  2134. roundup_pow_of_two(props->ring_len));
  2135. return -GSI_STATUS_INVALID_PARAMS;
  2136. }
  2137. last = props->ring_base_addr + props->ring_len - props->re_size;
  2138. /* MSB should stay same within the ring */
  2139. if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
  2140. (last & 0xFFFFFFFF00000000ULL)) {
  2141. GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
  2142. props->ring_base_addr,
  2143. props->ring_len);
  2144. return -GSI_STATUS_INVALID_PARAMS;
  2145. }
  2146. if (props->prot == GSI_CHAN_PROT_GPI &&
  2147. !props->ring_base_vaddr) {
  2148. GSIERR("protocol %u requires ring base VA\n", props->prot);
  2149. return -GSI_STATUS_INVALID_PARAMS;
  2150. }
  2151. if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
  2152. GSIERR("invalid channel low weight %u\n", props->low_weight);
  2153. return -GSI_STATUS_INVALID_PARAMS;
  2154. }
  2155. if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
  2156. GSIERR("xfer callback must be provided\n");
  2157. return -GSI_STATUS_INVALID_PARAMS;
  2158. }
  2159. if (!props->err_cb) {
  2160. GSIERR("err callback must be provided\n");
  2161. return -GSI_STATUS_INVALID_PARAMS;
  2162. }
  2163. return GSI_STATUS_SUCCESS;
  2164. }
  2165. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  2166. unsigned long *chan_hdl)
  2167. {
  2168. struct gsi_chan_ctx *ctx;
  2169. int res;
  2170. int ee;
  2171. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2172. uint8_t erindex;
  2173. struct gsi_user_data *user_data;
  2174. size_t user_data_size;
  2175. if (!gsi_ctx) {
  2176. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2177. return -GSI_STATUS_NODEV;
  2178. }
  2179. if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  2180. GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
  2181. props, dev_hdl, chan_hdl);
  2182. return -GSI_STATUS_INVALID_PARAMS;
  2183. }
  2184. if (gsi_validate_channel_props(props)) {
  2185. GSIERR("bad params\n");
  2186. return -GSI_STATUS_INVALID_PARAMS;
  2187. }
  2188. if (props->evt_ring_hdl != ~0) {
  2189. if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
  2190. GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
  2191. return -GSI_STATUS_INVALID_PARAMS;
  2192. }
  2193. if (atomic_read(
  2194. &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
  2195. gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
  2196. gsi_ctx->evtr[props->evt_ring_hdl].chan[0]->props.prot !=
  2197. GSI_CHAN_PROT_GCI) {
  2198. GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
  2199. props->evt_ring_hdl, chan_hdl);
  2200. return -GSI_STATUS_UNSUPPORTED_OP;
  2201. }
  2202. }
  2203. ctx = &gsi_ctx->chan[props->ch_id];
  2204. if (ctx->allocated) {
  2205. GSIERR("chan %d already allocated\n", props->ch_id);
  2206. return -GSI_STATUS_NODEV;
  2207. }
  2208. memset(ctx, 0, sizeof(*ctx));
  2209. /* For IPA offloaded WDI channels not required user_data pointer */
  2210. if (props->prot != GSI_CHAN_PROT_WDI2 &&
  2211. props->prot != GSI_CHAN_PROT_WDI3)
  2212. user_data_size = props->ring_len / props->re_size;
  2213. else
  2214. user_data_size = props->re_size;
  2215. /*
  2216. * GCI channels might have OOO event completions up to GSI_VEID_MAX.
  2217. * user_data needs to be large enough to accommodate those.
  2218. * TODO: increase user data size if GSI_VEID_MAX is not enough
  2219. */
  2220. if (props->prot == GSI_CHAN_PROT_GCI)
  2221. user_data_size += GSI_VEID_MAX;
  2222. user_data = devm_kzalloc(gsi_ctx->dev,
  2223. user_data_size * sizeof(*user_data),
  2224. GFP_KERNEL);
  2225. if (user_data == NULL) {
  2226. GSIERR("context not allocated\n");
  2227. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2228. }
  2229. mutex_init(&ctx->mlock);
  2230. init_completion(&ctx->compl);
  2231. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2232. ctx->props = *props;
  2233. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2234. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2235. mutex_lock(&gsi_ctx->mlock);
  2236. ee = gsi_ctx->per.ee;
  2237. gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
  2238. ch_cmd.chid = props->ch_id;
  2239. ch_cmd.opcode = op;
  2240. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2241. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2242. if (res == 0) {
  2243. GSIERR("chan_hdl=%u timed out\n", props->ch_id);
  2244. mutex_unlock(&gsi_ctx->mlock);
  2245. devm_kfree(gsi_ctx->dev, user_data);
  2246. return -GSI_STATUS_TIMED_OUT;
  2247. }
  2248. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2249. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2250. props->ch_id, ctx->state);
  2251. mutex_unlock(&gsi_ctx->mlock);
  2252. devm_kfree(gsi_ctx->dev, user_data);
  2253. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2254. }
  2255. mutex_unlock(&gsi_ctx->mlock);
  2256. } else {
  2257. mutex_lock(&gsi_ctx->mlock);
  2258. ctx->state = GSI_CHAN_STATE_ALLOCATED;
  2259. mutex_unlock(&gsi_ctx->mlock);
  2260. }
  2261. erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
  2262. GSI_NO_EVT_ERINDEX;
  2263. if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
  2264. GSIERR("invalid erindex %u\n", erindex);
  2265. devm_kfree(gsi_ctx->dev, user_data);
  2266. return -GSI_STATUS_INVALID_PARAMS;
  2267. }
  2268. if (erindex < GSI_EVT_RING_MAX) {
  2269. ctx->evtr = &gsi_ctx->evtr[erindex];
  2270. if(ctx->evtr->num_of_chan_allocated
  2271. >= MAX_CHANNELS_SHARING_EVENT_RING) {
  2272. GSIERR(
  2273. "too many channels sharing the same event ring %u\n",
  2274. erindex);
  2275. GSI_ASSERT();
  2276. }
  2277. if (props->prot != GSI_CHAN_PROT_GCI) {
  2278. atomic_inc(&ctx->evtr->chan_ref_cnt);
  2279. if (ctx->evtr->props.exclusive) {
  2280. if (atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
  2281. ctx->evtr->chan
  2282. [ctx->evtr->num_of_chan_allocated++] = ctx;
  2283. }
  2284. else {
  2285. ctx->evtr->chan[ctx->evtr->num_of_chan_allocated++]
  2286. = ctx;
  2287. }
  2288. }
  2289. }
  2290. gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
  2291. spin_lock_init(&ctx->ring.slock);
  2292. gsi_init_chan_ring(props, &ctx->ring);
  2293. if (!props->max_re_expected)
  2294. ctx->props.max_re_expected = ctx->ring.max_num_elem;
  2295. ctx->user_data = user_data;
  2296. *chan_hdl = props->ch_id;
  2297. ctx->allocated = true;
  2298. ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
  2299. atomic_inc(&gsi_ctx->num_chan);
  2300. if (props->prot == GSI_CHAN_PROT_GCI) {
  2301. gsi_ctx->coal_info.ch_id = props->ch_id;
  2302. gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
  2303. }
  2304. return GSI_STATUS_SUCCESS;
  2305. }
  2306. EXPORT_SYMBOL(gsi_alloc_channel);
  2307. static int gsi_alloc_ap_channel(unsigned int chan_hdl)
  2308. {
  2309. struct gsi_chan_ctx *ctx;
  2310. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2311. int res;
  2312. int ee;
  2313. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2314. if (!gsi_ctx) {
  2315. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2316. return -GSI_STATUS_NODEV;
  2317. }
  2318. ctx = &gsi_ctx->chan[chan_hdl];
  2319. if (ctx->allocated) {
  2320. GSIERR("chan %d already allocated\n", chan_hdl);
  2321. return -GSI_STATUS_NODEV;
  2322. }
  2323. memset(ctx, 0, sizeof(*ctx));
  2324. mutex_init(&ctx->mlock);
  2325. init_completion(&ctx->compl);
  2326. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2327. mutex_lock(&gsi_ctx->mlock);
  2328. ee = gsi_ctx->per.ee;
  2329. gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
  2330. ch_cmd.chid = chan_hdl;
  2331. ch_cmd.opcode = op;
  2332. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2333. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2334. if (res == 0) {
  2335. GSIERR("chan_hdl=%u timed out\n", chan_hdl);
  2336. mutex_unlock(&gsi_ctx->mlock);
  2337. return -GSI_STATUS_TIMED_OUT;
  2338. }
  2339. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2340. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2341. chan_hdl, ctx->state);
  2342. mutex_unlock(&gsi_ctx->mlock);
  2343. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2344. }
  2345. mutex_unlock(&gsi_ctx->mlock);
  2346. return GSI_STATUS_SUCCESS;
  2347. }
  2348. static void __gsi_write_channel_scratch(unsigned long chan_hdl,
  2349. union __packed gsi_channel_scratch val)
  2350. {
  2351. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2352. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2353. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2354. gsi_ctx->per.ee, chan_hdl, val.data.word2);
  2355. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2356. gsi_ctx->per.ee, chan_hdl, val.data.word3);
  2357. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2358. gsi_ctx->per.ee, chan_hdl, val.data.word4);
  2359. }
  2360. static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2361. union __packed gsi_wdi3_channel_scratch2_reg val)
  2362. {
  2363. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2364. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2365. }
  2366. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  2367. union __packed gsi_wdi_channel_scratch3_reg val)
  2368. {
  2369. struct gsi_chan_ctx *ctx;
  2370. if (!gsi_ctx) {
  2371. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2372. return -GSI_STATUS_NODEV;
  2373. }
  2374. if (chan_hdl >= gsi_ctx->max_ch) {
  2375. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2376. return -GSI_STATUS_INVALID_PARAMS;
  2377. }
  2378. ctx = &gsi_ctx->chan[chan_hdl];
  2379. mutex_lock(&ctx->mlock);
  2380. ctx->scratch.wdi.endp_metadatareg_offset =
  2381. val.wdi.endp_metadatareg_offset;
  2382. ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
  2383. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2384. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2385. mutex_unlock(&ctx->mlock);
  2386. return GSI_STATUS_SUCCESS;
  2387. }
  2388. EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
  2389. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  2390. union __packed gsi_wdi2_channel_scratch2_reg val)
  2391. {
  2392. struct gsi_chan_ctx *ctx;
  2393. if (!gsi_ctx) {
  2394. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2395. return -GSI_STATUS_NODEV;
  2396. }
  2397. if (chan_hdl >= gsi_ctx->max_ch) {
  2398. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2399. return -GSI_STATUS_INVALID_PARAMS;
  2400. }
  2401. ctx = &gsi_ctx->chan[chan_hdl];
  2402. mutex_lock(&ctx->mlock);
  2403. ctx->scratch.wdi2_new.endp_metadatareg_offset =
  2404. val.wdi.endp_metadatareg_offset;
  2405. ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
  2406. val.wdi.update_ri_moderation_threshold =
  2407. ctx->scratch.wdi2_new.update_ri_moderation_threshold;
  2408. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2409. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2410. mutex_unlock(&ctx->mlock);
  2411. return GSI_STATUS_SUCCESS;
  2412. }
  2413. EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
  2414. static void __gsi_read_channel_scratch(unsigned long chan_hdl,
  2415. union __packed gsi_channel_scratch * val)
  2416. {
  2417. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2418. gsi_ctx->per.ee, chan_hdl);
  2419. val->data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2420. gsi_ctx->per.ee, chan_hdl);
  2421. val->data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2422. gsi_ctx->per.ee, chan_hdl);
  2423. val->data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2424. gsi_ctx->per.ee, chan_hdl);
  2425. }
  2426. static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2427. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2428. {
  2429. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2430. gsi_ctx->per.ee, chan_hdl);
  2431. }
  2432. int gsi_write_channel_scratch(unsigned long chan_hdl,
  2433. union __packed gsi_channel_scratch val)
  2434. {
  2435. struct gsi_chan_ctx *ctx;
  2436. if (!gsi_ctx) {
  2437. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2438. return -GSI_STATUS_NODEV;
  2439. }
  2440. if (chan_hdl >= gsi_ctx->max_ch) {
  2441. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2442. return -GSI_STATUS_INVALID_PARAMS;
  2443. }
  2444. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2445. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2446. GSIERR("bad state %d\n",
  2447. gsi_ctx->chan[chan_hdl].state);
  2448. return -GSI_STATUS_UNSUPPORTED_OP;
  2449. }
  2450. ctx = &gsi_ctx->chan[chan_hdl];
  2451. mutex_lock(&ctx->mlock);
  2452. ctx->scratch = val;
  2453. __gsi_write_channel_scratch(chan_hdl, val);
  2454. mutex_unlock(&ctx->mlock);
  2455. return GSI_STATUS_SUCCESS;
  2456. }
  2457. EXPORT_SYMBOL(gsi_write_channel_scratch);
  2458. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2459. union __packed gsi_wdi3_channel_scratch2_reg val)
  2460. {
  2461. struct gsi_chan_ctx *ctx;
  2462. if (!gsi_ctx) {
  2463. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2464. return -GSI_STATUS_NODEV;
  2465. }
  2466. if (chan_hdl >= gsi_ctx->max_ch) {
  2467. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2468. return -GSI_STATUS_INVALID_PARAMS;
  2469. }
  2470. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2471. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2472. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2473. GSIERR("bad state %d\n",
  2474. gsi_ctx->chan[chan_hdl].state);
  2475. return -GSI_STATUS_UNSUPPORTED_OP;
  2476. }
  2477. ctx = &gsi_ctx->chan[chan_hdl];
  2478. mutex_lock(&ctx->mlock);
  2479. ctx->scratch.data.word3 = val.data.word1;
  2480. __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
  2481. mutex_unlock(&ctx->mlock);
  2482. return GSI_STATUS_SUCCESS;
  2483. }
  2484. EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
  2485. int gsi_read_channel_scratch(unsigned long chan_hdl,
  2486. union __packed gsi_channel_scratch *val)
  2487. {
  2488. struct gsi_chan_ctx *ctx;
  2489. if (!gsi_ctx) {
  2490. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2491. return -GSI_STATUS_NODEV;
  2492. }
  2493. if (chan_hdl >= gsi_ctx->max_ch) {
  2494. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2495. return -GSI_STATUS_INVALID_PARAMS;
  2496. }
  2497. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2498. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2499. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2500. GSIERR("bad state %d\n",
  2501. gsi_ctx->chan[chan_hdl].state);
  2502. return -GSI_STATUS_UNSUPPORTED_OP;
  2503. }
  2504. ctx = &gsi_ctx->chan[chan_hdl];
  2505. mutex_lock(&ctx->mlock);
  2506. __gsi_read_channel_scratch(chan_hdl, val);
  2507. mutex_unlock(&ctx->mlock);
  2508. return GSI_STATUS_SUCCESS;
  2509. }
  2510. EXPORT_SYMBOL(gsi_read_channel_scratch);
  2511. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2512. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2513. {
  2514. struct gsi_chan_ctx *ctx;
  2515. if (!gsi_ctx) {
  2516. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2517. return -GSI_STATUS_NODEV;
  2518. }
  2519. if (chan_hdl >= gsi_ctx->max_ch) {
  2520. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2521. return -GSI_STATUS_INVALID_PARAMS;
  2522. }
  2523. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2524. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2525. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2526. GSIERR("bad state %d\n",
  2527. gsi_ctx->chan[chan_hdl].state);
  2528. return -GSI_STATUS_UNSUPPORTED_OP;
  2529. }
  2530. ctx = &gsi_ctx->chan[chan_hdl];
  2531. mutex_lock(&ctx->mlock);
  2532. __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
  2533. mutex_unlock(&ctx->mlock);
  2534. return GSI_STATUS_SUCCESS;
  2535. }
  2536. EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
  2537. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  2538. struct __packed gsi_mhi_channel_scratch mscr)
  2539. {
  2540. struct gsi_chan_ctx *ctx;
  2541. if (!gsi_ctx) {
  2542. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2543. return -GSI_STATUS_NODEV;
  2544. }
  2545. if (chan_hdl >= gsi_ctx->max_ch) {
  2546. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2547. return -GSI_STATUS_INVALID_PARAMS;
  2548. }
  2549. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2550. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2551. GSIERR("bad state %d\n",
  2552. gsi_ctx->chan[chan_hdl].state);
  2553. return -GSI_STATUS_UNSUPPORTED_OP;
  2554. }
  2555. ctx = &gsi_ctx->chan[chan_hdl];
  2556. mutex_lock(&ctx->mlock);
  2557. ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
  2558. mutex_unlock(&ctx->mlock);
  2559. return GSI_STATUS_SUCCESS;
  2560. }
  2561. EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
  2562. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2563. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2564. {
  2565. if (!gsi_ctx) {
  2566. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2567. return -GSI_STATUS_NODEV;
  2568. }
  2569. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2570. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2571. db_addr_wp_lsb);
  2572. return -GSI_STATUS_INVALID_PARAMS;
  2573. }
  2574. if (chan_hdl >= gsi_ctx->max_ch) {
  2575. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2576. return -GSI_STATUS_INVALID_PARAMS;
  2577. }
  2578. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  2579. GSIERR("bad state %d\n",
  2580. gsi_ctx->chan[chan_hdl].state);
  2581. return -GSI_STATUS_UNSUPPORTED_OP;
  2582. }
  2583. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  2584. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  2585. gsi_ctx->per.ee, chan_hdl);
  2586. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  2587. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2588. gsi_ctx->per.ee, chan_hdl);
  2589. return GSI_STATUS_SUCCESS;
  2590. }
  2591. EXPORT_SYMBOL(gsi_query_channel_db_addr);
  2592. int gsi_pending_irq_type(void)
  2593. {
  2594. int ee = gsi_ctx->per.ee;
  2595. return gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ, ee);
  2596. }
  2597. EXPORT_SYMBOL(gsi_pending_irq_type);
  2598. int gsi_start_channel(unsigned long chan_hdl)
  2599. {
  2600. enum gsi_ch_cmd_opcode op = GSI_CH_START;
  2601. uint32_t val;
  2602. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2603. struct gsi_chan_ctx *ctx;
  2604. if (!gsi_ctx) {
  2605. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2606. return -GSI_STATUS_NODEV;
  2607. }
  2608. if (chan_hdl >= gsi_ctx->max_ch) {
  2609. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2610. return -GSI_STATUS_INVALID_PARAMS;
  2611. }
  2612. ctx = &gsi_ctx->chan[chan_hdl];
  2613. if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
  2614. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2615. ctx->state != GSI_CHAN_STATE_STOPPED) {
  2616. GSIERR("bad state %d\n", ctx->state);
  2617. return -GSI_STATUS_UNSUPPORTED_OP;
  2618. }
  2619. mutex_lock(&gsi_ctx->mlock);
  2620. reinit_completion(&ctx->compl);
  2621. /* check if INTSET is in IRQ mode for GPI channel */
  2622. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  2623. if (ctx->evtr &&
  2624. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2625. val != GSI_INTR_IRQ) {
  2626. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  2627. BUG();
  2628. }
  2629. gsi_ctx->ch_dbg[chan_hdl].ch_start++;
  2630. ch_cmd.chid = chan_hdl;
  2631. ch_cmd.opcode = op;
  2632. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2633. gsi_ctx->per.ee, &ch_cmd);
  2634. GSIDBG("GSI Channel Start, waiting for completion\n");
  2635. gsi_channel_state_change_wait(chan_hdl,
  2636. ctx,
  2637. GSI_START_CMD_TIMEOUT_MS, op);
  2638. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2639. ctx->state != GSI_CHAN_STATE_FLOW_CONTROL) {
  2640. /*
  2641. * Hardware returned unexpected status, unexpected
  2642. * hardware state.
  2643. */
  2644. GSIERR("chan=%lu timed out, unexpected state=%u\n",
  2645. chan_hdl, ctx->state);
  2646. gsi_dump_ch_info(chan_hdl);
  2647. GSI_ASSERT();
  2648. }
  2649. GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
  2650. /* write order MUST be MSB followed by LSB */
  2651. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2652. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  2653. mutex_unlock(&gsi_ctx->mlock);
  2654. return GSI_STATUS_SUCCESS;
  2655. }
  2656. EXPORT_SYMBOL(gsi_start_channel);
  2657. void gsi_dump_ch_info(unsigned long chan_hdl)
  2658. {
  2659. uint32_t val;
  2660. if (!gsi_ctx) {
  2661. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2662. return;
  2663. }
  2664. if (chan_hdl >= gsi_ctx->max_ch) {
  2665. GSIDBG("invalid chan id %u\n", chan_hdl);
  2666. return;
  2667. }
  2668. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2669. gsi_ctx->per.ee, chan_hdl);
  2670. GSIERR("CH%2d CTX0 0x%x\n", chan_hdl, val);
  2671. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2672. gsi_ctx->per.ee, chan_hdl);
  2673. GSIERR("CH%2d CTX1 0x%x\n", chan_hdl, val);
  2674. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2675. gsi_ctx->per.ee, chan_hdl);
  2676. GSIERR("CH%2d CTX2 0x%x\n", chan_hdl, val);
  2677. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2678. gsi_ctx->per.ee, chan_hdl);
  2679. GSIERR("CH%2d CTX3 0x%x\n", chan_hdl, val);
  2680. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  2681. gsi_ctx->per.ee, chan_hdl);
  2682. GSIERR("CH%2d CTX4 0x%x\n", chan_hdl, val);
  2683. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  2684. gsi_ctx->per.ee, chan_hdl);
  2685. GSIERR("CH%2d CTX5 0x%x\n", chan_hdl, val);
  2686. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  2687. gsi_ctx->per.ee, chan_hdl);
  2688. GSIERR("CH%2d CTX6 0x%x\n", chan_hdl, val);
  2689. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  2690. gsi_ctx->per.ee, chan_hdl);
  2691. GSIERR("CH%2d CTX7 0x%x\n", chan_hdl, val);
  2692. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2693. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_8,
  2694. gsi_ctx->per.ee, chan_hdl);
  2695. GSIERR("CH%2d CTX8 0x%x\n", chan_hdl, val);
  2696. }
  2697. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  2698. gsi_ctx->per.ee, chan_hdl);
  2699. GSIERR("CH%2d REFRP 0x%x\n", chan_hdl, val);
  2700. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  2701. gsi_ctx->per.ee, chan_hdl);
  2702. GSIERR("CH%2d REFWP 0x%x\n", chan_hdl, val);
  2703. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  2704. gsi_ctx->per.ee, chan_hdl);
  2705. GSIERR("CH%2d QOS 0x%x\n", chan_hdl, val);
  2706. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2707. gsi_ctx->per.ee, chan_hdl);
  2708. GSIERR("CH%2d SCR0 0x%x\n", chan_hdl, val);
  2709. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2710. gsi_ctx->per.ee, chan_hdl);
  2711. GSIERR("CH%2d SCR1 0x%x\n", chan_hdl, val);
  2712. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2713. gsi_ctx->per.ee, chan_hdl);
  2714. GSIERR("CH%2d SCR2 0x%x\n", chan_hdl, val);
  2715. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2716. gsi_ctx->per.ee, chan_hdl);
  2717. GSIERR("CH%2d SCR3 0x%x\n", chan_hdl, val);
  2718. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2719. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4,
  2720. gsi_ctx->per.ee, chan_hdl);
  2721. GSIERR("CH%2d SCR4 0x%x\n", chan_hdl, val);
  2722. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_5,
  2723. gsi_ctx->per.ee, chan_hdl);
  2724. GSIERR("CH%2d SCR5 0x%x\n", chan_hdl, val);
  2725. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_6,
  2726. gsi_ctx->per.ee, chan_hdl);
  2727. GSIERR("CH%2d SCR6 0x%x\n", chan_hdl, val);
  2728. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_7,
  2729. gsi_ctx->per.ee, chan_hdl);
  2730. GSIERR("CH%2d SCR7 0x%x\n", chan_hdl, val);
  2731. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_8,
  2732. gsi_ctx->per.ee, chan_hdl);
  2733. GSIERR("CH%2d SCR8 0x%x\n", chan_hdl, val);
  2734. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_9,
  2735. gsi_ctx->per.ee, chan_hdl);
  2736. GSIERR("CH%2d SCR9 0x%x\n", chan_hdl, val);
  2737. }
  2738. return;
  2739. }
  2740. EXPORT_SYMBOL(gsi_dump_ch_info);
  2741. int gsi_stop_channel(unsigned long chan_hdl)
  2742. {
  2743. enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
  2744. int res;
  2745. uint32_t val;
  2746. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2747. struct gsi_chan_ctx *ctx;
  2748. if (!gsi_ctx) {
  2749. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2750. return -GSI_STATUS_NODEV;
  2751. }
  2752. if (chan_hdl >= gsi_ctx->max_ch) {
  2753. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2754. return -GSI_STATUS_INVALID_PARAMS;
  2755. }
  2756. ctx = &gsi_ctx->chan[chan_hdl];
  2757. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2758. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2759. return GSI_STATUS_SUCCESS;
  2760. }
  2761. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2762. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2763. ctx->state != GSI_CHAN_STATE_ERROR) {
  2764. GSIERR("bad state %d\n", ctx->state);
  2765. return -GSI_STATUS_UNSUPPORTED_OP;
  2766. }
  2767. mutex_lock(&gsi_ctx->mlock);
  2768. reinit_completion(&ctx->compl);
  2769. /* check if INTSET is in IRQ mode for GPI channel */
  2770. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  2771. if (ctx->evtr &&
  2772. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2773. val != GSI_INTR_IRQ) {
  2774. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  2775. BUG();
  2776. }
  2777. gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
  2778. ch_cmd.chid = chan_hdl;
  2779. ch_cmd.opcode = op;
  2780. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2781. gsi_ctx->per.ee, &ch_cmd);
  2782. GSIDBG("GSI Channel Stop, waiting for completion: 0x%x\n", val);
  2783. gsi_channel_state_change_wait(chan_hdl,
  2784. ctx,
  2785. GSI_STOP_CMD_TIMEOUT_MS, op);
  2786. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2787. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2788. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2789. gsi_dump_ch_info(chan_hdl);
  2790. res = -GSI_STATUS_BAD_STATE;
  2791. BUG();
  2792. goto free_lock;
  2793. }
  2794. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2795. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2796. res = -GSI_STATUS_AGAIN;
  2797. goto free_lock;
  2798. }
  2799. res = GSI_STATUS_SUCCESS;
  2800. free_lock:
  2801. mutex_unlock(&gsi_ctx->mlock);
  2802. return res;
  2803. }
  2804. EXPORT_SYMBOL(gsi_stop_channel);
  2805. int gsi_stop_db_channel(unsigned long chan_hdl)
  2806. {
  2807. enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
  2808. int res;
  2809. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2810. struct gsi_chan_ctx *ctx;
  2811. if (!gsi_ctx) {
  2812. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2813. return -GSI_STATUS_NODEV;
  2814. }
  2815. if (chan_hdl >= gsi_ctx->max_ch) {
  2816. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2817. return -GSI_STATUS_INVALID_PARAMS;
  2818. }
  2819. ctx = &gsi_ctx->chan[chan_hdl];
  2820. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2821. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2822. return GSI_STATUS_SUCCESS;
  2823. }
  2824. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2825. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2826. GSIERR("bad state %d\n", ctx->state);
  2827. return -GSI_STATUS_UNSUPPORTED_OP;
  2828. }
  2829. mutex_lock(&gsi_ctx->mlock);
  2830. reinit_completion(&ctx->compl);
  2831. gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
  2832. ch_cmd.chid = chan_hdl;
  2833. ch_cmd.opcode = op;
  2834. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2835. gsi_ctx->per.ee, &ch_cmd);
  2836. res = wait_for_completion_timeout(&ctx->compl,
  2837. msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
  2838. if (res == 0) {
  2839. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2840. res = -GSI_STATUS_TIMED_OUT;
  2841. goto free_lock;
  2842. }
  2843. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2844. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2845. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2846. res = -GSI_STATUS_BAD_STATE;
  2847. goto free_lock;
  2848. }
  2849. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2850. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2851. res = -GSI_STATUS_AGAIN;
  2852. goto free_lock;
  2853. }
  2854. res = GSI_STATUS_SUCCESS;
  2855. free_lock:
  2856. mutex_unlock(&gsi_ctx->mlock);
  2857. return res;
  2858. }
  2859. EXPORT_SYMBOL(gsi_stop_db_channel);
  2860. int gsi_reset_channel(unsigned long chan_hdl)
  2861. {
  2862. enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
  2863. int res;
  2864. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2865. struct gsi_chan_ctx *ctx;
  2866. bool reset_done = false;
  2867. uint32_t retry_cnt = 0;
  2868. if (!gsi_ctx) {
  2869. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2870. return -GSI_STATUS_NODEV;
  2871. }
  2872. if (chan_hdl >= gsi_ctx->max_ch) {
  2873. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2874. return -GSI_STATUS_INVALID_PARAMS;
  2875. }
  2876. ctx = &gsi_ctx->chan[chan_hdl];
  2877. /*
  2878. * In WDI3 case, if SAP enabled but no client connected,
  2879. * GSI will be in allocated state. When SAP disabled,
  2880. * gsi_reset_channel will be called and reset is needed.
  2881. */
  2882. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2883. ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2884. GSIERR("bad state %d\n", ctx->state);
  2885. return -GSI_STATUS_UNSUPPORTED_OP;
  2886. }
  2887. mutex_lock(&gsi_ctx->mlock);
  2888. reset:
  2889. reinit_completion(&ctx->compl);
  2890. gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
  2891. ch_cmd.chid = chan_hdl;
  2892. ch_cmd.opcode = op;
  2893. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2894. gsi_ctx->per.ee, &ch_cmd);
  2895. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2896. if (res == 0) {
  2897. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2898. mutex_unlock(&gsi_ctx->mlock);
  2899. return -GSI_STATUS_TIMED_OUT;
  2900. }
  2901. revrfy_chnlstate:
  2902. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2903. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2904. ctx->state);
  2905. /* GSI register update state not sync with gsi channel
  2906. * context state not sync, need to wait for 1ms to sync.
  2907. */
  2908. retry_cnt++;
  2909. if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
  2910. usleep_range(GSI_RESET_WA_MIN_SLEEP,
  2911. GSI_RESET_WA_MAX_SLEEP);
  2912. goto revrfy_chnlstate;
  2913. }
  2914. /*
  2915. * Hardware returned incorrect state, unexpected
  2916. * hardware state.
  2917. */
  2918. GSI_ASSERT();
  2919. }
  2920. /* Hardware issue fixed from GSI 2.0 and no need for the WA */
  2921. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  2922. reset_done = true;
  2923. /* workaround: reset GSI producers again */
  2924. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
  2925. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  2926. reset_done = true;
  2927. goto reset;
  2928. }
  2929. if (ctx->props.cleanup_cb)
  2930. gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
  2931. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  2932. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  2933. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  2934. /* restore scratch */
  2935. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  2936. mutex_unlock(&gsi_ctx->mlock);
  2937. return GSI_STATUS_SUCCESS;
  2938. }
  2939. EXPORT_SYMBOL(gsi_reset_channel);
  2940. int gsi_dealloc_channel(unsigned long chan_hdl)
  2941. {
  2942. enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
  2943. int res;
  2944. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2945. struct gsi_chan_ctx *ctx;
  2946. if (!gsi_ctx) {
  2947. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2948. return -GSI_STATUS_NODEV;
  2949. }
  2950. if (chan_hdl >= gsi_ctx->max_ch) {
  2951. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2952. return -GSI_STATUS_INVALID_PARAMS;
  2953. }
  2954. ctx = &gsi_ctx->chan[chan_hdl];
  2955. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2956. GSIERR("bad state %d\n", ctx->state);
  2957. return -GSI_STATUS_UNSUPPORTED_OP;
  2958. }
  2959. /*In GSI_VER_2_2 version deallocation channel not supported*/
  2960. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2961. mutex_lock(&gsi_ctx->mlock);
  2962. reinit_completion(&ctx->compl);
  2963. gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
  2964. ch_cmd.chid = chan_hdl;
  2965. ch_cmd.opcode = op;
  2966. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2967. gsi_ctx->per.ee, &ch_cmd);
  2968. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2969. if (res == 0) {
  2970. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2971. mutex_unlock(&gsi_ctx->mlock);
  2972. return -GSI_STATUS_TIMED_OUT;
  2973. }
  2974. if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
  2975. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2976. ctx->state);
  2977. /* Hardware returned incorrect value */
  2978. GSI_ASSERT();
  2979. }
  2980. mutex_unlock(&gsi_ctx->mlock);
  2981. } else {
  2982. mutex_lock(&gsi_ctx->mlock);
  2983. GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
  2984. ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
  2985. GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
  2986. ctx->state);
  2987. mutex_unlock(&gsi_ctx->mlock);
  2988. }
  2989. devm_kfree(gsi_ctx->dev, ctx->user_data);
  2990. ctx->allocated = false;
  2991. if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI)) {
  2992. atomic_dec(&ctx->evtr->chan_ref_cnt);
  2993. ctx->evtr->num_of_chan_allocated--;
  2994. }
  2995. atomic_dec(&gsi_ctx->num_chan);
  2996. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  2997. gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
  2998. gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
  2999. }
  3000. return GSI_STATUS_SUCCESS;
  3001. }
  3002. EXPORT_SYMBOL(gsi_dealloc_channel);
  3003. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
  3004. {
  3005. unsigned long now = jiffies_to_msecs(jiffies);
  3006. unsigned long elapsed;
  3007. if (used == 0) {
  3008. elapsed = now - ctx->stats.dp.last_timestamp;
  3009. if (ctx->stats.dp.empty_time < elapsed)
  3010. ctx->stats.dp.empty_time = elapsed;
  3011. }
  3012. if (used <= ctx->props.max_re_expected / 3)
  3013. ++ctx->stats.dp.ch_below_lo;
  3014. else if (used <= 2 * ctx->props.max_re_expected / 3)
  3015. ++ctx->stats.dp.ch_below_hi;
  3016. else
  3017. ++ctx->stats.dp.ch_above_hi;
  3018. ctx->stats.dp.last_timestamp = now;
  3019. }
  3020. static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
  3021. uint16_t *num_free_re)
  3022. {
  3023. uint16_t start;
  3024. uint16_t end;
  3025. uint64_t rp;
  3026. int ee = gsi_ctx->per.ee;
  3027. uint16_t used;
  3028. WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
  3029. if (!ctx->evtr) {
  3030. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3031. ee, ctx->props.ch_id);
  3032. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3033. ctx->ring.rp = rp;
  3034. } else {
  3035. rp = ctx->ring.rp_local;
  3036. }
  3037. start = gsi_find_idx_from_addr(&ctx->ring, rp);
  3038. end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3039. if (end >= start)
  3040. used = end - start;
  3041. else
  3042. used = ctx->ring.max_num_elem + 1 - (start - end);
  3043. *num_free_re = ctx->ring.max_num_elem - used;
  3044. }
  3045. int gsi_query_channel_info(unsigned long chan_hdl,
  3046. struct gsi_chan_info *info)
  3047. {
  3048. struct gsi_chan_ctx *ctx;
  3049. spinlock_t *slock;
  3050. unsigned long flags;
  3051. uint64_t rp;
  3052. uint64_t wp;
  3053. int ee;
  3054. if (!gsi_ctx) {
  3055. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3056. return -GSI_STATUS_NODEV;
  3057. }
  3058. if (chan_hdl >= gsi_ctx->max_ch || !info) {
  3059. GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
  3060. return -GSI_STATUS_INVALID_PARAMS;
  3061. }
  3062. ctx = &gsi_ctx->chan[chan_hdl];
  3063. if (ctx->evtr) {
  3064. slock = &ctx->evtr->ring.slock;
  3065. info->evt_valid = true;
  3066. } else {
  3067. slock = &ctx->ring.slock;
  3068. info->evt_valid = false;
  3069. }
  3070. spin_lock_irqsave(slock, flags);
  3071. ee = gsi_ctx->per.ee;
  3072. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3073. ee, ctx->props.ch_id);
  3074. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  3075. ee, ctx->props.ch_id)) << 32;
  3076. ctx->ring.rp = rp;
  3077. info->rp = rp;
  3078. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3079. ee, ctx->props.ch_id);
  3080. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  3081. ee, ctx->props.ch_id)) << 32;
  3082. ctx->ring.wp = wp;
  3083. info->wp = wp;
  3084. if (info->evt_valid) {
  3085. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4,
  3086. ee, ctx->evtr->id);
  3087. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_5,
  3088. ee, ctx->evtr->id)) << 32;
  3089. info->evt_rp = rp;
  3090. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3091. ee, ctx->evtr->id);
  3092. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_7,
  3093. ee, ctx->evtr->id)) << 32;
  3094. info->evt_wp = wp;
  3095. }
  3096. spin_unlock_irqrestore(slock, flags);
  3097. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
  3098. chan_hdl, info->rp, info->wp,
  3099. info->evt_valid, info->evt_rp, info->evt_wp);
  3100. return GSI_STATUS_SUCCESS;
  3101. }
  3102. EXPORT_SYMBOL(gsi_query_channel_info);
  3103. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
  3104. {
  3105. struct gsi_chan_ctx *ctx;
  3106. spinlock_t *slock;
  3107. unsigned long flags;
  3108. uint64_t rp;
  3109. uint64_t wp;
  3110. uint64_t rp_local;
  3111. int ee;
  3112. if (!gsi_ctx) {
  3113. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3114. return -GSI_STATUS_NODEV;
  3115. }
  3116. if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
  3117. GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
  3118. chan_hdl, is_empty);
  3119. return -GSI_STATUS_INVALID_PARAMS;
  3120. }
  3121. ctx = &gsi_ctx->chan[chan_hdl];
  3122. ee = gsi_ctx->per.ee;
  3123. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3124. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3125. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3126. return -GSI_STATUS_UNSUPPORTED_OP;
  3127. }
  3128. if (ctx->evtr)
  3129. slock = &ctx->evtr->ring.slock;
  3130. else
  3131. slock = &ctx->ring.slock;
  3132. spin_lock_irqsave(slock, flags);
  3133. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
  3134. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4,
  3135. ee, ctx->evtr->id);
  3136. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3137. ctx->evtr->ring.rp = rp;
  3138. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3139. ee, ctx->evtr->id);
  3140. wp |= ctx->evtr->ring.wp & GSI_MSB_MASK;
  3141. ctx->evtr->ring.wp = wp;
  3142. rp_local = ctx->evtr->ring.rp_local;
  3143. } else {
  3144. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3145. ee, ctx->props.ch_id);
  3146. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3147. ctx->ring.rp = rp;
  3148. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3149. ee, ctx->props.ch_id);
  3150. wp |= ctx->ring.wp & GSI_MSB_MASK;
  3151. ctx->ring.wp = wp;
  3152. rp_local = ctx->ring.rp_local;
  3153. }
  3154. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  3155. *is_empty = (rp_local == rp) ? true : false;
  3156. else
  3157. *is_empty = (wp == rp) ? true : false;
  3158. spin_unlock_irqrestore(slock, flags);
  3159. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
  3160. GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3161. chan_hdl, ctx->evtr->id, rp, wp, rp_local);
  3162. else
  3163. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3164. chan_hdl, rp, wp, rp_local);
  3165. return GSI_STATUS_SUCCESS;
  3166. }
  3167. EXPORT_SYMBOL(gsi_is_channel_empty);
  3168. bool gsi_is_event_pending(unsigned long chan_hdl) {
  3169. struct gsi_chan_ctx *ctx;
  3170. uint64_t rp;
  3171. uint64_t rp_local;
  3172. int ee;
  3173. if (chan_hdl >= gsi_ctx->max_ch) {
  3174. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3175. return false;
  3176. }
  3177. ctx = &gsi_ctx->chan[chan_hdl];
  3178. ee = gsi_ctx->per.ee;
  3179. /* read only, updating will be handled in NAPI context if needed */
  3180. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3181. &ctx->evtr->props, ctx->evtr->id, ee);
  3182. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3183. rp_local = ctx->evtr->ring.rp_local;
  3184. return rp != rp_local;
  3185. }
  3186. EXPORT_SYMBOL(gsi_is_event_pending);
  3187. int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
  3188. {
  3189. int i;
  3190. int end;
  3191. if (!ctx->user_data[idx].valid) {
  3192. ctx->user_data[idx].valid = true;
  3193. return idx;
  3194. }
  3195. /*
  3196. * at this point we need to find an "escape buffer" for the cookie
  3197. * as the userdata in this spot is in use. This happens if the TRE at
  3198. * idx is not completed yet and it is getting reused by a new TRE.
  3199. */
  3200. ctx->stats.userdata_in_use++;
  3201. end = ctx->ring.max_num_elem + 1;
  3202. for (i = 0; i < GSI_VEID_MAX; i++) {
  3203. if (!ctx->user_data[end + i].valid) {
  3204. ctx->user_data[end + i].valid = true;
  3205. return end + i;
  3206. }
  3207. }
  3208. /* Go over original userdata when escape buffer is full (costly) */
  3209. GSIDBG("escape buffer is full\n");
  3210. for (i = 0; i < end; i++) {
  3211. if (!ctx->user_data[i].valid) {
  3212. ctx->user_data[i].valid = true;
  3213. return i;
  3214. }
  3215. }
  3216. /* Everything is full (possibly a stall) */
  3217. GSIERR("both userdata array and escape buffer is full\n");
  3218. BUG();
  3219. return 0xFFFF;
  3220. }
  3221. int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
  3222. struct gsi_xfer_elem *xfer)
  3223. {
  3224. struct gsi_gci_tre gci_tre;
  3225. struct gsi_gci_tre *tre_gci_ptr;
  3226. uint16_t idx;
  3227. memset(&gci_tre, 0, sizeof(gci_tre));
  3228. if (xfer->addr & 0xFFFFFF0000000000) {
  3229. GSIERR("chan_hdl=%u add too large=%llx\n",
  3230. ctx->props.ch_id, xfer->addr);
  3231. return -EINVAL;
  3232. }
  3233. if (xfer->type != GSI_XFER_ELEM_DATA) {
  3234. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3235. xfer->type);
  3236. return -EINVAL;
  3237. }
  3238. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3239. tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
  3240. idx * ctx->ring.elem_sz);
  3241. gci_tre.buffer_ptr = xfer->addr;
  3242. gci_tre.buf_len = xfer->len;
  3243. gci_tre.re_type = GSI_RE_COAL;
  3244. gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
  3245. if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
  3246. return -EPERM;
  3247. /* write the TRE to ring */
  3248. *tre_gci_ptr = gci_tre;
  3249. ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
  3250. return 0;
  3251. }
  3252. int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
  3253. struct gsi_xfer_elem *xfer)
  3254. {
  3255. struct gsi_tre tre;
  3256. struct gsi_tre *tre_ptr;
  3257. uint16_t idx;
  3258. memset(&tre, 0, sizeof(tre));
  3259. tre.buffer_ptr = xfer->addr;
  3260. tre.buf_len = xfer->len;
  3261. if (xfer->type == GSI_XFER_ELEM_DATA) {
  3262. tre.re_type = GSI_RE_XFER;
  3263. } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
  3264. tre.re_type = GSI_RE_IMMD_CMD;
  3265. } else if (xfer->type == GSI_XFER_ELEM_NOP) {
  3266. tre.re_type = GSI_RE_NOP;
  3267. } else {
  3268. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3269. xfer->type);
  3270. return -EINVAL;
  3271. }
  3272. tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
  3273. tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
  3274. tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
  3275. tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
  3276. if (unlikely(ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3277. GSIERR("bad state %d\n", ctx->state);
  3278. return -GSI_STATUS_UNSUPPORTED_OP;
  3279. }
  3280. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3281. tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
  3282. idx * ctx->ring.elem_sz);
  3283. /* write the TRE to ring */
  3284. *tre_ptr = tre;
  3285. ctx->user_data[idx].valid = true;
  3286. ctx->user_data[idx].p = xfer->xfer_user_data;
  3287. return 0;
  3288. }
  3289. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  3290. struct gsi_xfer_elem *xfer, bool ring_db)
  3291. {
  3292. struct gsi_chan_ctx *ctx;
  3293. uint16_t free;
  3294. uint64_t wp_rollback;
  3295. int i;
  3296. spinlock_t *slock;
  3297. unsigned long flags;
  3298. if (!gsi_ctx) {
  3299. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3300. return -GSI_STATUS_NODEV;
  3301. }
  3302. if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
  3303. GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
  3304. chan_hdl, num_xfers, xfer);
  3305. return -GSI_STATUS_INVALID_PARAMS;
  3306. }
  3307. if (unlikely(gsi_ctx->chan[chan_hdl].state
  3308. == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3309. GSIERR("bad state %d\n",
  3310. gsi_ctx->chan[chan_hdl].state);
  3311. return -GSI_STATUS_UNSUPPORTED_OP;
  3312. }
  3313. ctx = &gsi_ctx->chan[chan_hdl];
  3314. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3315. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3316. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3317. return -GSI_STATUS_UNSUPPORTED_OP;
  3318. }
  3319. if (ctx->evtr)
  3320. slock = &ctx->evtr->ring.slock;
  3321. else
  3322. slock = &ctx->ring.slock;
  3323. spin_lock_irqsave(slock, flags);
  3324. /* allow only ring doorbell */
  3325. if (!num_xfers)
  3326. goto ring_doorbell;
  3327. /*
  3328. * for GCI channels the responsibility is on the caller to make sure
  3329. * there is enough room in the TRE.
  3330. */
  3331. if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3332. __gsi_query_channel_free_re(ctx, &free);
  3333. if (num_xfers > free) {
  3334. GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
  3335. chan_hdl, num_xfers, free);
  3336. spin_unlock_irqrestore(slock, flags);
  3337. return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
  3338. }
  3339. }
  3340. wp_rollback = ctx->ring.wp_local;
  3341. for (i = 0; i < num_xfers; i++) {
  3342. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3343. if (__gsi_populate_gci_tre(ctx, &xfer[i]))
  3344. break;
  3345. } else {
  3346. if (__gsi_populate_tre(ctx, &xfer[i]))
  3347. break;
  3348. }
  3349. gsi_incr_ring_wp(&ctx->ring);
  3350. }
  3351. if (i != num_xfers) {
  3352. /* reject all the xfers */
  3353. ctx->ring.wp_local = wp_rollback;
  3354. spin_unlock_irqrestore(slock, flags);
  3355. return -GSI_STATUS_INVALID_PARAMS;
  3356. }
  3357. ctx->stats.queued += num_xfers;
  3358. ring_doorbell:
  3359. if (ring_db) {
  3360. /* ensure TRE is set before ringing doorbell */
  3361. wmb();
  3362. gsi_ring_chan_doorbell(ctx);
  3363. }
  3364. spin_unlock_irqrestore(slock, flags);
  3365. return GSI_STATUS_SUCCESS;
  3366. }
  3367. EXPORT_SYMBOL(gsi_queue_xfer);
  3368. int gsi_start_xfer(unsigned long chan_hdl)
  3369. {
  3370. struct gsi_chan_ctx *ctx;
  3371. if (!gsi_ctx) {
  3372. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3373. return -GSI_STATUS_NODEV;
  3374. }
  3375. if (chan_hdl >= gsi_ctx->max_ch) {
  3376. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3377. return -GSI_STATUS_INVALID_PARAMS;
  3378. }
  3379. ctx = &gsi_ctx->chan[chan_hdl];
  3380. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3381. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3382. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3383. return -GSI_STATUS_UNSUPPORTED_OP;
  3384. }
  3385. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3386. GSIERR("bad state %d\n", ctx->state);
  3387. return -GSI_STATUS_UNSUPPORTED_OP;
  3388. }
  3389. if (ctx->ring.wp == ctx->ring.wp_local)
  3390. return GSI_STATUS_SUCCESS;
  3391. gsi_ring_chan_doorbell(ctx);
  3392. return GSI_STATUS_SUCCESS;
  3393. };
  3394. EXPORT_SYMBOL(gsi_start_xfer);
  3395. int gsi_poll_channel(unsigned long chan_hdl,
  3396. struct gsi_chan_xfer_notify *notify)
  3397. {
  3398. int unused_var;
  3399. return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
  3400. }
  3401. EXPORT_SYMBOL(gsi_poll_channel);
  3402. int gsi_poll_n_channel(unsigned long chan_hdl,
  3403. struct gsi_chan_xfer_notify *notify,
  3404. int expected_num, int *actual_num)
  3405. {
  3406. struct gsi_chan_ctx *ctx;
  3407. uint64_t rp;
  3408. int ee;
  3409. int i;
  3410. unsigned long flags;
  3411. if (!gsi_ctx) {
  3412. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3413. return -GSI_STATUS_NODEV;
  3414. }
  3415. if (chan_hdl >= gsi_ctx->max_ch || !notify ||
  3416. !actual_num || expected_num <= 0) {
  3417. GSIERR("bad params chan_hdl=%lu notify=%pK\n",
  3418. chan_hdl, notify);
  3419. GSIERR("actual_num=%pK expected_num=%d\n",
  3420. actual_num, expected_num);
  3421. return -GSI_STATUS_INVALID_PARAMS;
  3422. }
  3423. ctx = &gsi_ctx->chan[chan_hdl];
  3424. ee = gsi_ctx->per.ee;
  3425. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3426. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3427. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3428. return -GSI_STATUS_UNSUPPORTED_OP;
  3429. }
  3430. /* Before going to poll packet make sure it was in allocated state */
  3431. if (unlikely(ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3432. GSIERR("bad state %d\n", ctx->state);
  3433. return -GSI_STATUS_UNSUPPORTED_OP;
  3434. }
  3435. if (!ctx->evtr) {
  3436. GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
  3437. return -GSI_STATUS_UNSUPPORTED_OP;
  3438. }
  3439. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3440. if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
  3441. /* update rp to see of we have anything new to process */
  3442. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3443. &ctx->evtr->props, ctx->evtr->id, ee);
  3444. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3445. ctx->evtr->ring.rp = rp;
  3446. /* read gsi event ring rp again if last read is empty */
  3447. if (rp == ctx->evtr->ring.rp_local) {
  3448. /* event ring is empty */
  3449. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3450. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3451. ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3452. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3453. }
  3454. else {
  3455. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3456. ee, 1 << ctx->evtr->id);
  3457. }
  3458. /* do another read to close a small window */
  3459. __iowmb();
  3460. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3461. &ctx->evtr->props, ctx->evtr->id, ee);
  3462. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3463. ctx->evtr->ring.rp = rp;
  3464. if (rp == ctx->evtr->ring.rp_local) {
  3465. spin_unlock_irqrestore(
  3466. &ctx->evtr->ring.slock,
  3467. flags);
  3468. ctx->stats.poll_empty++;
  3469. return GSI_STATUS_POLL_EMPTY;
  3470. }
  3471. }
  3472. }
  3473. *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
  3474. ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
  3475. if (*actual_num > expected_num)
  3476. *actual_num = expected_num;
  3477. for (i = 0; i < *actual_num; i++)
  3478. gsi_process_evt_re(ctx->evtr, notify + i, false);
  3479. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3480. ctx->stats.poll_ok++;
  3481. return GSI_STATUS_SUCCESS;
  3482. }
  3483. EXPORT_SYMBOL(gsi_poll_n_channel);
  3484. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
  3485. {
  3486. struct gsi_chan_ctx *ctx, *coal_ctx;
  3487. enum gsi_chan_mode curr;
  3488. unsigned long flags;
  3489. enum gsi_chan_mode chan_mode;
  3490. int i;
  3491. if (!gsi_ctx) {
  3492. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3493. return -GSI_STATUS_NODEV;
  3494. }
  3495. if (chan_hdl >= gsi_ctx->max_ch) {
  3496. GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
  3497. return -GSI_STATUS_INVALID_PARAMS;
  3498. }
  3499. ctx = &gsi_ctx->chan[chan_hdl];
  3500. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3501. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3502. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3503. return -GSI_STATUS_UNSUPPORTED_OP;
  3504. }
  3505. if (!ctx->evtr) {
  3506. GSIERR("cannot configure mode on chan_hdl=%lu\n",
  3507. chan_hdl);
  3508. return -GSI_STATUS_UNSUPPORTED_OP;
  3509. }
  3510. if (atomic_read(&ctx->poll_mode))
  3511. curr = GSI_CHAN_MODE_POLL;
  3512. else
  3513. curr = GSI_CHAN_MODE_CALLBACK;
  3514. if (mode == curr) {
  3515. GSIDBG("already in requested mode %u chan_hdl=%lu\n",
  3516. curr, chan_hdl);
  3517. return -GSI_STATUS_UNSUPPORTED_OP;
  3518. }
  3519. spin_lock_irqsave(&gsi_ctx->slock, flags);
  3520. if (curr == GSI_CHAN_MODE_CALLBACK &&
  3521. mode == GSI_CHAN_MODE_POLL) {
  3522. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3523. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3524. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3525. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3526. 0);
  3527. }
  3528. else {
  3529. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3530. }
  3531. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3532. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3533. gsi_ctx->per.ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3534. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3535. }
  3536. else {
  3537. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3538. gsi_ctx->per.ee, 1 << ctx->evtr->id);
  3539. }
  3540. atomic_set(&ctx->poll_mode, mode);
  3541. for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
  3542. atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
  3543. }
  3544. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
  3545. atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
  3546. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3547. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3548. if (coal_ctx != NULL)
  3549. atomic_set(&coal_ctx->poll_mode, mode);
  3550. }
  3551. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3552. ctx->evtr->id, mode);
  3553. ctx->stats.callback_to_poll++;
  3554. }
  3555. if (curr == GSI_CHAN_MODE_POLL &&
  3556. mode == GSI_CHAN_MODE_CALLBACK) {
  3557. atomic_set(&ctx->poll_mode, mode);
  3558. for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
  3559. atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
  3560. }
  3561. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
  3562. atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
  3563. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3564. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3565. if (coal_ctx != NULL)
  3566. atomic_set(&coal_ctx->poll_mode, mode);
  3567. }
  3568. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3569. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3570. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3571. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3572. ~0);
  3573. }
  3574. else {
  3575. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
  3576. }
  3577. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3578. ctx->evtr->id, mode);
  3579. /*
  3580. * In GSI 2.2 and 2.5 there is a limitation that can lead
  3581. * to losing an interrupt. For these versions an
  3582. * explicit check is needed after enabling the interrupt
  3583. */
  3584. if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
  3585. gsi_ctx->per.ver == GSI_VER_2_5) &&
  3586. !gsi_ctx->per.skip_ieob_mask_wa) {
  3587. u32 src = gsihal_read_reg_n(
  3588. GSI_EE_n_CNTXT_SRC_IEOB_IRQ,
  3589. gsi_ctx->per.ee);
  3590. if (src & (1 << ctx->evtr->id)) {
  3591. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3592. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3593. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3594. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3595. 0);
  3596. gsihal_write_reg_nk(
  3597. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3598. gsi_ctx->per.ee,
  3599. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3600. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3601. }
  3602. else {
  3603. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 <<
  3604. ctx->evtr->id, 0);
  3605. gsihal_write_reg_n(
  3606. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3607. gsi_ctx->per.ee,
  3608. 1 << ctx->evtr->id);
  3609. }
  3610. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3611. spin_lock_irqsave(&ctx->evtr->ring.slock,
  3612. flags);
  3613. chan_mode = atomic_xchg(&ctx->poll_mode,
  3614. GSI_CHAN_MODE_POLL);
  3615. spin_unlock_irqrestore(
  3616. &ctx->evtr->ring.slock, flags);
  3617. ctx->stats.poll_pending_irq++;
  3618. GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
  3619. ctx->stats.poll_pending_irq,
  3620. chan_mode);
  3621. if (chan_mode == GSI_CHAN_MODE_POLL)
  3622. return GSI_STATUS_SUCCESS;
  3623. else
  3624. return -GSI_STATUS_PENDING_IRQ;
  3625. }
  3626. }
  3627. ctx->stats.poll_to_callback++;
  3628. }
  3629. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3630. return GSI_STATUS_SUCCESS;
  3631. }
  3632. EXPORT_SYMBOL(gsi_config_channel_mode);
  3633. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3634. union gsi_channel_scratch *scr)
  3635. {
  3636. struct gsi_chan_ctx *ctx;
  3637. if (!gsi_ctx) {
  3638. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3639. return -GSI_STATUS_NODEV;
  3640. }
  3641. if (!props || !scr) {
  3642. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  3643. return -GSI_STATUS_INVALID_PARAMS;
  3644. }
  3645. if (chan_hdl >= gsi_ctx->max_ch) {
  3646. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3647. return -GSI_STATUS_INVALID_PARAMS;
  3648. }
  3649. ctx = &gsi_ctx->chan[chan_hdl];
  3650. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3651. GSIERR("bad state %d\n", ctx->state);
  3652. return -GSI_STATUS_UNSUPPORTED_OP;
  3653. }
  3654. mutex_lock(&ctx->mlock);
  3655. *props = ctx->props;
  3656. *scr = ctx->scratch;
  3657. mutex_unlock(&ctx->mlock);
  3658. return GSI_STATUS_SUCCESS;
  3659. }
  3660. EXPORT_SYMBOL(gsi_get_channel_cfg);
  3661. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3662. union gsi_channel_scratch *scr)
  3663. {
  3664. struct gsi_chan_ctx *ctx;
  3665. if (!gsi_ctx) {
  3666. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3667. return -GSI_STATUS_NODEV;
  3668. }
  3669. if (!props || gsi_validate_channel_props(props)) {
  3670. GSIERR("bad params props=%pK\n", props);
  3671. return -GSI_STATUS_INVALID_PARAMS;
  3672. }
  3673. if (chan_hdl >= gsi_ctx->max_ch) {
  3674. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3675. return -GSI_STATUS_INVALID_PARAMS;
  3676. }
  3677. ctx = &gsi_ctx->chan[chan_hdl];
  3678. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3679. GSIERR("bad state %d\n", ctx->state);
  3680. return -GSI_STATUS_UNSUPPORTED_OP;
  3681. }
  3682. if (ctx->props.ch_id != props->ch_id ||
  3683. ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
  3684. GSIERR("changing immutable fields not supported\n");
  3685. return -GSI_STATUS_UNSUPPORTED_OP;
  3686. }
  3687. mutex_lock(&ctx->mlock);
  3688. ctx->props = *props;
  3689. if (scr)
  3690. ctx->scratch = *scr;
  3691. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  3692. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  3693. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  3694. /* restore scratch */
  3695. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  3696. mutex_unlock(&ctx->mlock);
  3697. return GSI_STATUS_SUCCESS;
  3698. }
  3699. EXPORT_SYMBOL(gsi_set_channel_cfg);
  3700. static void gsi_configure_ieps(enum gsi_ver ver)
  3701. {
  3702. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_CMD, 1);
  3703. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DB, 2);
  3704. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DIS_COMP, 3);
  3705. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_EMPTY, 4);
  3706. gsihal_write_reg(GSI_GSI_IRAM_PTR_EE_GENERIC_CMD, 5);
  3707. gsihal_write_reg(GSI_GSI_IRAM_PTR_EVENT_GEN_COMP, 6);
  3708. gsihal_write_reg(GSI_GSI_IRAM_PTR_INT_MOD_STOPPED, 7);
  3709. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0, 8);
  3710. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2, 9);
  3711. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1, 10);
  3712. gsihal_write_reg(GSI_GSI_IRAM_PTR_NEW_RE, 11);
  3713. gsihal_write_reg(GSI_GSI_IRAM_PTR_READ_ENG_COMP, 12);
  3714. gsihal_write_reg(GSI_GSI_IRAM_PTR_TIMER_EXPIRED, 13);
  3715. gsihal_write_reg(GSI_GSI_IRAM_PTR_EV_DB, 14);
  3716. gsihal_write_reg(GSI_GSI_IRAM_PTR_UC_GP_INT, 15);
  3717. gsihal_write_reg(GSI_GSI_IRAM_PTR_WRITE_ENG_COMP, 16);
  3718. if (ver >= GSI_VER_2_5)
  3719. gsihal_write_reg(
  3720. GSI_GSI_IRAM_PTR_TLV_CH_NOT_FULL,
  3721. 17);
  3722. if (ver >= GSI_VER_2_11)
  3723. gsihal_write_reg(
  3724. GSI_GSI_IRAM_PTR_MSI_DB,
  3725. 18);
  3726. if (ver >= GSI_VER_3_0)
  3727. gsihal_write_reg(
  3728. GSI_GSI_IRAM_PTR_INT_NOTIFY_MCS,
  3729. 19);
  3730. }
  3731. static void gsi_configure_bck_prs_matrix(void)
  3732. {
  3733. /*
  3734. * For now, these are default values. In the future, GSI FW image will
  3735. * produce optimized back-pressure values based on the FW image.
  3736. */
  3737. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_LSB, 0xfffffffe);
  3738. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_MSB, 0xffffffff);
  3739. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_LSB, 0xffffffbf);
  3740. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_MSB, 0xffffffff);
  3741. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_LSB, 0xffffefff);
  3742. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_MSB, 0xffffffff);
  3743. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_LSB, 0xffffefff);
  3744. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_MSB, 0xffffffff);
  3745. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_LSB, 0x00000000);
  3746. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_MSB, 0x00000000);
  3747. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_LSB, 0xf9ffffff);
  3748. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_MSB, 0xffffffff);
  3749. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_LSB, 0xf9ffffff);
  3750. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_MSB, 0xffffffff);
  3751. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_LSB, 0xffffffff);
  3752. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_MSB, 0xfffffffe);
  3753. gsihal_write_reg(GSI_IC_READ_BCK_PRS_LSB, 0xffffffff);
  3754. gsihal_write_reg(GSI_IC_READ_BCK_PRS_MSB, 0xffffefff);
  3755. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_LSB, 0xffffffff);
  3756. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_MSB, 0xffffdfff);
  3757. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB, 0xffffffff);
  3758. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB, 0xff03ffff);
  3759. }
  3760. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
  3761. {
  3762. if (!gsi_ctx) {
  3763. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3764. return -GSI_STATUS_NODEV;
  3765. }
  3766. if (!gsi_ctx->base) {
  3767. GSIERR("access to GSI HW has not been mapped\n");
  3768. return -GSI_STATUS_INVALID_PARAMS;
  3769. }
  3770. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3771. GSIERR("Incorrect version %d\n", ver);
  3772. return -GSI_STATUS_ERROR;
  3773. }
  3774. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_MSB, 0);
  3775. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_LSB, per_base_addr);
  3776. gsi_configure_bck_prs_matrix();
  3777. gsi_configure_ieps(ver);
  3778. return 0;
  3779. }
  3780. EXPORT_SYMBOL(gsi_configure_regs);
  3781. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  3782. {
  3783. struct gsihal_reg_gsi_cfg gsi_cfg;
  3784. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3785. GSIERR("Incorrect version %d\n", ver);
  3786. return -GSI_STATUS_ERROR;
  3787. }
  3788. /* Enable the MCS and set to x2 clocks */
  3789. gsi_cfg.gsi_enable = 1;
  3790. gsi_cfg.double_mcs_clk_freq = 1;
  3791. gsi_cfg.uc_is_mcs = 0;
  3792. gsi_cfg.gsi_pwr_clps = 0;
  3793. gsi_cfg.bp_mtrix_disable = 0;
  3794. if (ver >= GSI_VER_1_2) {
  3795. gsihal_write_reg(GSI_GSI_MCS_CFG, 1);
  3796. gsi_cfg.mcs_enable = 0;
  3797. } else {
  3798. gsi_cfg.mcs_enable = 1;
  3799. }
  3800. /* GSI frequency is peripheral frequency divided by 3 (2+1) */
  3801. if (ver >= GSI_VER_2_5)
  3802. gsi_cfg.sleep_clk_div = 2;
  3803. gsihal_write_reg_fields(GSI_GSI_CFG, &gsi_cfg);
  3804. return 0;
  3805. }
  3806. EXPORT_SYMBOL(gsi_enable_fw);
  3807. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  3808. unsigned long *size, enum gsi_ver ver)
  3809. {
  3810. if (!gsi_ctx) {
  3811. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3812. return;
  3813. }
  3814. if (size)
  3815. *size = gsihal_get_inst_ram_size();
  3816. if (base_offset) {
  3817. *base_offset = gsihal_get_reg_n_ofst(GSI_GSI_INST_RAM_n, 0);
  3818. }
  3819. }
  3820. EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
  3821. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3822. {
  3823. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
  3824. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3825. int res;
  3826. if (!gsi_ctx) {
  3827. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3828. return -GSI_STATUS_NODEV;
  3829. }
  3830. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3831. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3832. return -GSI_STATUS_INVALID_PARAMS;
  3833. }
  3834. mutex_lock(&gsi_ctx->mlock);
  3835. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3836. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3837. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3838. /* invalidate the response */
  3839. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(
  3840. GSI_EE_n_CNTXT_SCRATCH_0, gsi_ctx->per.ee);
  3841. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3842. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3843. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3844. gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
  3845. cmd.opcode = op;
  3846. cmd.virt_chan_idx = chan_idx;
  3847. cmd.ee = ee;
  3848. gsihal_write_reg_n_fields(GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3849. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3850. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3851. if (res == 0) {
  3852. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3853. res = -GSI_STATUS_TIMED_OUT;
  3854. goto free_lock;
  3855. }
  3856. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3857. gsi_ctx->per.ee);
  3858. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3859. GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
  3860. GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
  3861. *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
  3862. res = -GSI_STATUS_AGAIN;
  3863. goto free_lock;
  3864. }
  3865. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3866. GSIERR("No response received\n");
  3867. res = -GSI_STATUS_ERROR;
  3868. goto free_lock;
  3869. }
  3870. res = GSI_STATUS_SUCCESS;
  3871. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3872. free_lock:
  3873. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3874. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  3875. mutex_unlock(&gsi_ctx->mlock);
  3876. return res;
  3877. }
  3878. EXPORT_SYMBOL(gsi_halt_channel_ee);
  3879. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3880. {
  3881. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
  3882. struct gsi_chan_ctx *ctx;
  3883. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3884. int res;
  3885. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3886. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3887. return -GSI_STATUS_INVALID_PARAMS;
  3888. }
  3889. if (ee == 0)
  3890. return gsi_alloc_ap_channel(chan_idx);
  3891. mutex_lock(&gsi_ctx->mlock);
  3892. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3893. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3894. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3895. /* invalidate the response */
  3896. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3897. gsi_ctx->per.ee);
  3898. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3899. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3900. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3901. cmd.opcode = op;
  3902. cmd.virt_chan_idx = chan_idx;
  3903. cmd.ee = ee;
  3904. gsihal_write_reg_n_fields(
  3905. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3906. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3907. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3908. if (res == 0) {
  3909. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3910. res = -GSI_STATUS_TIMED_OUT;
  3911. goto free_lock;
  3912. }
  3913. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3914. gsi_ctx->per.ee);
  3915. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3916. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
  3917. GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
  3918. *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
  3919. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3920. goto free_lock;
  3921. }
  3922. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3923. GSIERR("No response received\n");
  3924. res = -GSI_STATUS_ERROR;
  3925. goto free_lock;
  3926. }
  3927. if (ee == 0) {
  3928. ctx = &gsi_ctx->chan[chan_idx];
  3929. gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
  3930. }
  3931. res = GSI_STATUS_SUCCESS;
  3932. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3933. free_lock:
  3934. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3935. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  3936. mutex_unlock(&gsi_ctx->mlock);
  3937. return res;
  3938. }
  3939. EXPORT_SYMBOL(gsi_alloc_channel_ee);
  3940. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  3941. int *code)
  3942. {
  3943. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
  3944. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  3945. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3946. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  3947. int res;
  3948. if (!gsi_ctx) {
  3949. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3950. return -GSI_STATUS_NODEV;
  3951. }
  3952. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3953. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3954. return -GSI_STATUS_INVALID_PARAMS;
  3955. }
  3956. mutex_lock(&gsi_ctx->mlock);
  3957. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3958. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3959. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3960. /* invalidate the response */
  3961. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3962. gsi_ctx->per.ee);
  3963. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3964. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3965. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3966. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  3967. cmd.opcode = op;
  3968. cmd.virt_chan_idx = chan_idx;
  3969. cmd.ee = ee;
  3970. gsihal_write_reg_n_fields(
  3971. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3972. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3973. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3974. if (res == 0) {
  3975. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3976. res = -GSI_STATUS_TIMED_OUT;
  3977. goto free_lock;
  3978. }
  3979. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3980. gsi_ctx->per.ee);
  3981. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3982. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  3983. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  3984. chan_idx, ee);
  3985. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  3986. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3987. goto free_lock;
  3988. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3989. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
  3990. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3991. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX) {
  3992. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  3993. chan_idx, ee);
  3994. GSI_ASSERT();
  3995. }
  3996. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3997. GSIERR("No response received\n");
  3998. res = -GSI_STATUS_ERROR;
  3999. goto free_lock;
  4000. }
  4001. /*Reading current channel state*/
  4002. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  4003. gsi_ctx->per.ee, chan_idx, &ch_k_cntxt_0);
  4004. curr_state = ch_k_cntxt_0.chstate;
  4005. if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  4006. GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
  4007. res = GSI_STATUS_SUCCESS;
  4008. } else {
  4009. GSIERR("ch %u state updated to %u incorrect state\n",
  4010. chan_idx, curr_state);
  4011. res = -GSI_STATUS_ERROR;
  4012. }
  4013. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4014. free_lock:
  4015. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4016. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4017. mutex_unlock(&gsi_ctx->mlock);
  4018. return res;
  4019. }
  4020. EXPORT_SYMBOL(gsi_enable_flow_control_ee);
  4021. int gsi_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  4022. bool enable, bool prmy_scnd_fc, int *code)
  4023. {
  4024. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4025. enum gsi_generic_ee_cmd_opcode op = enable ?
  4026. GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL :
  4027. GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL;
  4028. int res;
  4029. if (!gsi_ctx) {
  4030. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4031. return -GSI_STATUS_NODEV;
  4032. }
  4033. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4034. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4035. return -GSI_STATUS_INVALID_PARAMS;
  4036. }
  4037. mutex_lock(&gsi_ctx->mlock);
  4038. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4039. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4040. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4041. /* invalidate the response */
  4042. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4043. gsi_ctx->per.ee);
  4044. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4045. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4046. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4047. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  4048. cmd.opcode = op;
  4049. cmd.virt_chan_idx = chan_idx;
  4050. cmd.ee = ee;
  4051. cmd.prmy_scnd_fc = prmy_scnd_fc;
  4052. gsihal_write_reg_n_fields(
  4053. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4054. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4055. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4056. if (res == 0) {
  4057. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4058. res = -GSI_STATUS_TIMED_OUT;
  4059. GSI_ASSERT();
  4060. goto free_lock;
  4061. }
  4062. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4063. gsi_ctx->per.ee);
  4064. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4065. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  4066. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  4067. chan_idx, ee);
  4068. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  4069. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  4070. goto free_lock;
  4071. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4072. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE) {
  4073. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  4074. chan_idx, ee);
  4075. GSI_ASSERT();
  4076. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  4077. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX) {
  4078. GSIERR("Channel ID = %u ee = %u not allocated\n", chan_idx, ee);
  4079. }
  4080. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  4081. GSIERR("No response received\n");
  4082. res = -GSI_STATUS_ERROR;
  4083. goto free_lock;
  4084. }
  4085. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  4086. res = GSI_STATUS_SUCCESS;
  4087. free_lock:
  4088. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4089. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4090. mutex_unlock(&gsi_ctx->mlock);
  4091. return res;
  4092. }
  4093. EXPORT_SYMBOL(gsi_flow_control_ee);
  4094. int gsi_query_flow_control_state_ee(unsigned int chan_idx, unsigned int ee,
  4095. bool prmy_scnd_fc, int *code)
  4096. {
  4097. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  4098. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_QUERY_FLOW_CHANNEL;
  4099. int res;
  4100. if (!gsi_ctx) {
  4101. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4102. return -GSI_STATUS_NODEV;
  4103. }
  4104. if (chan_idx >= gsi_ctx->max_ch || !code) {
  4105. GSIERR("bad params chan_idx=%d\n", chan_idx);
  4106. return -GSI_STATUS_INVALID_PARAMS;
  4107. }
  4108. mutex_lock(&gsi_ctx->mlock);
  4109. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4110. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  4111. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  4112. /* invalidate the response */
  4113. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4114. gsi_ctx->per.ee);
  4115. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  4116. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4117. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  4118. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  4119. cmd.opcode = op;
  4120. cmd.virt_chan_idx = chan_idx;
  4121. cmd.ee = ee;
  4122. cmd.prmy_scnd_fc = prmy_scnd_fc;
  4123. gsihal_write_reg_n_fields(
  4124. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  4125. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  4126. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  4127. if (res == 0) {
  4128. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  4129. res = -GSI_STATUS_TIMED_OUT;
  4130. goto free_lock;
  4131. }
  4132. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  4133. gsi_ctx->per.ee);
  4134. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val;
  4135. if (prmy_scnd_fc)
  4136. res = (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val ==
  4137. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_SECONDARY)?
  4138. GSI_STATUS_SUCCESS:-GSI_STATUS_ERROR;
  4139. else
  4140. res = (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_val ==
  4141. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PRIMARY)?
  4142. GSI_STATUS_SUCCESS:-GSI_STATUS_ERROR;
  4143. free_lock:
  4144. __gsi_config_glob_irq(gsi_ctx->per.ee,
  4145. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  4146. mutex_unlock(&gsi_ctx->mlock);
  4147. return res;
  4148. }
  4149. EXPORT_SYMBOL(gsi_query_flow_control_state_ee);
  4150. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
  4151. {
  4152. if (!gsi_ctx) {
  4153. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4154. return -GSI_STATUS_NODEV;
  4155. }
  4156. if (!gsi_ctx->base) {
  4157. GSIERR("access to GSI HW has not been mapped\n");
  4158. return -GSI_STATUS_INVALID_PARAMS;
  4159. }
  4160. gsihal_write_reg_nk(GSI_MAP_EE_n_CH_k_VP_TABLE,
  4161. ee, chan_num, per_ep_index);
  4162. return 0;
  4163. }
  4164. EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
  4165. void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
  4166. uint32_t db_addr_low, uint32_t db_addr_high)
  4167. {
  4168. if (!gsi_ctx) {
  4169. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4170. return;
  4171. }
  4172. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  4173. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_10,
  4174. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  4175. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_11,
  4176. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  4177. } else {
  4178. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_12,
  4179. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  4180. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_13,
  4181. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  4182. }
  4183. }
  4184. EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
  4185. int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
  4186. {
  4187. if (is_rp) {
  4188. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  4189. gsi_ctx->per.ee, chan_hdl);
  4190. } else {
  4191. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4192. gsi_ctx->per.ee, chan_hdl);
  4193. }
  4194. }
  4195. EXPORT_SYMBOL(gsi_get_refetch_reg);
  4196. int gsi_get_drop_stats(unsigned long ep_id, int scratch_id)
  4197. {
  4198. /* RTK use scratch 5 */
  4199. if (scratch_id == 5) {
  4200. /*
  4201. * each channel context is 6 lines of 8 bytes, but n in SHRAM_n
  4202. * is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will
  4203. * give the beginning of the required channel context, and then
  4204. * need to add 7 since the channel context layout has the ring
  4205. * rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding
  4206. * additional 28/4 = 7 to get to scratch 5 of the required
  4207. * channel.
  4208. */
  4209. gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7);
  4210. }
  4211. return 0;
  4212. }
  4213. EXPORT_SYMBOL(gsi_get_drop_stats);
  4214. void gsi_wdi3_dump_register(unsigned long chan_hdl)
  4215. {
  4216. uint32_t val;
  4217. if (!gsi_ctx) {
  4218. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4219. return;
  4220. }
  4221. GSIDBG("reg dump ch id %ld\n", chan_hdl);
  4222. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  4223. gsi_ctx->per.ee, chan_hdl);
  4224. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0 0x%x\n", val);
  4225. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  4226. gsi_ctx->per.ee, chan_hdl);
  4227. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1 0x%x\n", val);
  4228. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  4229. gsi_ctx->per.ee, chan_hdl);
  4230. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2 0x%x\n", val);
  4231. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  4232. gsi_ctx->per.ee, chan_hdl);
  4233. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3 0x%x\n", val);
  4234. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  4235. gsi_ctx->per.ee, chan_hdl);
  4236. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4 0x%x\n", val);
  4237. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  4238. gsi_ctx->per.ee, chan_hdl);
  4239. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5 0x%x\n", val);
  4240. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  4241. gsi_ctx->per.ee, chan_hdl);
  4242. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6 0x%x\n", val);
  4243. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  4244. gsi_ctx->per.ee, chan_hdl);
  4245. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7 0x%x\n", val);
  4246. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  4247. gsi_ctx->per.ee, chan_hdl);
  4248. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR 0x%x\n", val);
  4249. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4250. gsi_ctx->per.ee, chan_hdl);
  4251. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR 0x%x\n", val);
  4252. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  4253. gsi_ctx->per.ee, chan_hdl);
  4254. GSIDBG("GSI_EE_n_GSI_CH_k_QOS 0x%x\n", val);
  4255. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4256. gsi_ctx->per.ee, chan_hdl);
  4257. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0 0x%x\n", val);
  4258. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4259. gsi_ctx->per.ee, chan_hdl);
  4260. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1 0x%x\n", val);
  4261. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4262. gsi_ctx->per.ee, chan_hdl);
  4263. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2 0x%x\n", val);
  4264. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4265. gsi_ctx->per.ee, chan_hdl);
  4266. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3 0x%x\n", val);
  4267. }
  4268. EXPORT_SYMBOL(gsi_wdi3_dump_register);
  4269. int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr)
  4270. {
  4271. if (!gsi_ctx) {
  4272. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4273. return -GSI_STATUS_NODEV;
  4274. }
  4275. if (chan_hdl >= gsi_ctx->max_ch) {
  4276. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  4277. return -GSI_STATUS_INVALID_PARAMS;
  4278. }
  4279. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  4280. GSIERR("bad state %d\n",
  4281. gsi_ctx->chan[chan_hdl].state);
  4282. return -GSI_STATUS_UNSUPPORTED_OP;
  4283. }
  4284. *addr = (phys_addr_t)(gsi_ctx->per.phys_addr +
  4285. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_CNTXT_8,
  4286. gsi_ctx->per.ee, chan_hdl));
  4287. return 0;
  4288. }
  4289. EXPORT_SYMBOL(gsi_query_msi_addr);
  4290. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  4291. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
  4292. {
  4293. union __packed gsi_channel_scratch scr;
  4294. /* below sequence is not atomic. assumption is sequencer specific fields
  4295. * will remain unchanged across this sequence
  4296. */
  4297. /* READ */
  4298. scr.data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4299. gsi_ctx->per.ee, chan_hdl);
  4300. scr.data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4301. gsi_ctx->per.ee, chan_hdl);
  4302. scr.data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4303. gsi_ctx->per.ee, chan_hdl);
  4304. scr.data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4305. gsi_ctx->per.ee, chan_hdl);
  4306. /* UPDATE */
  4307. scr.mhi.polling_mode = mscr.polling_mode;
  4308. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  4309. scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
  4310. scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
  4311. }
  4312. /* WRITE */
  4313. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4314. gsi_ctx->per.ee, chan_hdl, scr.data.word1);
  4315. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4316. gsi_ctx->per.ee, chan_hdl, scr.data.word2);
  4317. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4318. gsi_ctx->per.ee, chan_hdl, scr.data.word3);
  4319. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4320. gsi_ctx->per.ee, chan_hdl, scr.data.word4);
  4321. return scr;
  4322. }
  4323. /**
  4324. * gsi_get_hw_profiling_stats() - Query GSI HW profiling stats
  4325. * @stats: [out] stats blob from client populated by driver
  4326. *
  4327. * Returns: 0 on success, negative on failure
  4328. *
  4329. */
  4330. int gsi_get_hw_profiling_stats(struct gsi_hw_profiling_data *stats)
  4331. {
  4332. if (stats == NULL) {
  4333. GSIERR("bad parms NULL stats == NULL\n");
  4334. return -EINVAL;
  4335. }
  4336. stats->bp_cnt = (u64)gsihal_read_reg(
  4337. GSI_GSI_MCS_PROFILING_BP_CNT_LSB) +
  4338. ((u64)gsihal_read_reg(
  4339. GSI_GSI_MCS_PROFILING_BP_CNT_MSB) << 32);
  4340. stats->bp_and_pending_cnt = (u64)gsihal_read_reg(
  4341. GSI_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_LSB) +
  4342. ((u64)gsihal_read_reg(
  4343. GSI_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_MSB) << 32);
  4344. stats->mcs_busy_cnt = (u64)gsihal_read_reg(
  4345. GSI_GSI_MCS_PROFILING_MCS_BUSY_CNT_LSB) +
  4346. ((u64)gsihal_read_reg(
  4347. GSI_GSI_MCS_PROFILING_MCS_BUSY_CNT_MSB) << 32);
  4348. stats->mcs_idle_cnt = (u64)gsihal_read_reg(
  4349. GSI_GSI_MCS_PROFILING_MCS_IDLE_CNT_LSB) +
  4350. ((u64)gsihal_read_reg(
  4351. GSI_GSI_MCS_PROFILING_MCS_IDLE_CNT_MSB) << 32);
  4352. return 0;
  4353. }
  4354. /**
  4355. * gsi_get_fw_version() - Query GSI FW version
  4356. * @ver: [out] ver blob from client populated by driver
  4357. *
  4358. * Returns: 0 on success, negative on failure
  4359. *
  4360. */
  4361. int gsi_get_fw_version(struct gsi_fw_version *ver)
  4362. {
  4363. u32 raw = 0;
  4364. if (ver == NULL) {
  4365. GSIERR("bad parms: ver == NULL\n");
  4366. return -EINVAL;
  4367. }
  4368. if (gsi_ctx->per.ver < GSI_VER_3_0)
  4369. raw = gsihal_read_reg_n(GSI_GSI_INST_RAM_n,
  4370. GSI_INST_RAM_FW_VER_OFFSET);
  4371. else
  4372. raw = gsihal_read_reg_n(GSI_GSI_INST_RAM_n,
  4373. GSI_INST_RAM_FW_VER_GSI_3_0_OFFSET);
  4374. ver->hw = (raw & GSI_INST_RAM_FW_VER_HW_MASK) >>
  4375. GSI_INST_RAM_FW_VER_HW_SHIFT;
  4376. ver->flavor = (raw & GSI_INST_RAM_FW_VER_FLAVOR_MASK) >>
  4377. GSI_INST_RAM_FW_VER_FLAVOR_SHIFT;
  4378. ver->fw = (raw & GSI_INST_RAM_FW_VER_FW_MASK) >>
  4379. GSI_INST_RAM_FW_VER_FW_SHIFT;
  4380. return 0;
  4381. }
  4382. static int msm_gsi_probe(struct platform_device *pdev)
  4383. {
  4384. struct device *dev = &pdev->dev;
  4385. pr_debug("gsi_probe\n");
  4386. gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
  4387. if (!gsi_ctx) {
  4388. dev_err(dev, "failed to allocated gsi context\n");
  4389. return -ENOMEM;
  4390. }
  4391. gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
  4392. "gsi", 0);
  4393. if (gsi_ctx->ipc_logbuf == NULL)
  4394. GSIERR("failed to create IPC log, continue...\n");
  4395. gsi_ctx->dev = dev;
  4396. init_completion(&gsi_ctx->gen_ee_cmd_compl);
  4397. gsi_debugfs_init();
  4398. return 0;
  4399. }
  4400. static struct platform_driver msm_gsi_driver = {
  4401. .probe = msm_gsi_probe,
  4402. .driver = {
  4403. .name = "gsi",
  4404. .of_match_table = msm_gsi_match,
  4405. },
  4406. };
  4407. static struct platform_device *pdev;
  4408. /**
  4409. * Module Init.
  4410. */
  4411. static int __init gsi_init(void)
  4412. {
  4413. int ret;
  4414. pr_debug("%s\n", __func__);
  4415. ret = platform_driver_register(&msm_gsi_driver);
  4416. if (ret < 0)
  4417. goto out;
  4418. if (running_emulation) {
  4419. pdev = platform_device_register_simple("gsi", -1, NULL, 0);
  4420. if (IS_ERR(pdev)) {
  4421. ret = PTR_ERR(pdev);
  4422. platform_driver_unregister(&msm_gsi_driver);
  4423. goto out;
  4424. }
  4425. }
  4426. out:
  4427. return ret;
  4428. }
  4429. arch_initcall(gsi_init);
  4430. /*
  4431. * Module exit.
  4432. */
  4433. static void __exit gsi_exit(void)
  4434. {
  4435. if (running_emulation && pdev)
  4436. platform_device_unregister(pdev);
  4437. platform_driver_unregister(&msm_gsi_driver);
  4438. }
  4439. module_exit(gsi_exit);
  4440. MODULE_LICENSE("GPL v2");
  4441. MODULE_DESCRIPTION("Generic Software Interface (GSI)");