gsi.c 125 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/io.h>
  8. #include <linux/log2.h>
  9. #include <linux/module.h>
  10. #include <linux/msm_gsi.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/delay.h>
  13. #include "gsi.h"
  14. #include "gsi_reg.h"
  15. #include "gsi_emulation.h"
  16. #define GSI_CMD_TIMEOUT (5*HZ)
  17. #define GSI_START_CMD_TIMEOUT_MS 1000
  18. #define GSI_CMD_POLL_CNT 5
  19. #define GSI_STOP_CMD_TIMEOUT_MS 200
  20. #define GSI_MAX_CH_LOW_WEIGHT 15
  21. #define GSI_STOP_CMD_POLL_CNT 4
  22. #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
  23. #define GSI_RESET_WA_MIN_SLEEP 1000
  24. #define GSI_RESET_WA_MAX_SLEEP 2000
  25. #define GSI_CHNL_STATE_MAX_RETRYCNT 10
  26. #define GSI_STTS_REG_BITS 32
  27. #ifndef CONFIG_DEBUG_FS
  28. void gsi_debugfs_init(void)
  29. {
  30. }
  31. #endif
  32. static const struct of_device_id msm_gsi_match[] = {
  33. { .compatible = "qcom,msm_gsi", },
  34. { },
  35. };
  36. #if defined(CONFIG_IPA_EMULATION)
  37. static bool running_emulation = true;
  38. #else
  39. static bool running_emulation;
  40. #endif
  41. struct gsi_ctx *gsi_ctx;
  42. static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
  43. {
  44. uint32_t curr;
  45. curr = gsi_readl(gsi_ctx->base +
  46. GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
  47. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  48. GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
  49. }
  50. static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
  51. {
  52. uint32_t curr;
  53. curr = gsi_readl(gsi_ctx->base +
  54. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
  55. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  56. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
  57. }
  58. static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
  59. {
  60. uint32_t curr;
  61. curr = gsi_readl(gsi_ctx->base +
  62. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
  63. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  64. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
  65. }
  66. static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
  67. {
  68. uint32_t curr;
  69. curr = gsi_readl(gsi_ctx->base +
  70. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  71. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  72. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  73. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  74. curr, ((curr & ~mask) | (val & mask)));
  75. }
  76. static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
  77. {
  78. uint32_t curr;
  79. curr = gsi_readl(gsi_ctx->base +
  80. GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
  81. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  82. GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
  83. }
  84. static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
  85. {
  86. uint32_t curr;
  87. curr = gsi_readl(gsi_ctx->base +
  88. GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
  89. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  90. GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
  91. }
  92. static void gsi_channel_state_change_wait(unsigned long chan_hdl,
  93. struct gsi_chan_ctx *ctx,
  94. uint32_t tm, enum gsi_ch_cmd_opcode op)
  95. {
  96. int poll_cnt;
  97. int gsi_pending_intr;
  98. int res;
  99. uint32_t type;
  100. uint32_t val;
  101. int ee = gsi_ctx->per.ee;
  102. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  103. int stop_in_proc_retry = 0;
  104. int stop_retry = 0;
  105. /*
  106. * Start polling the GSI channel for
  107. * duration = tm * GSI_CMD_POLL_CNT.
  108. * We need to do polling of gsi state for improving debugability
  109. * of gsi hw state.
  110. */
  111. for (poll_cnt = 0;
  112. poll_cnt < GSI_CMD_POLL_CNT;
  113. poll_cnt++) {
  114. res = wait_for_completion_timeout(&ctx->compl,
  115. msecs_to_jiffies(tm));
  116. /* Interrupt received, return */
  117. if (res != 0)
  118. return;
  119. type = gsi_readl(gsi_ctx->base +
  120. GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(gsi_ctx->per.ee));
  121. gsi_pending_intr = gsi_readl(gsi_ctx->base +
  122. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
  123. /* Update the channel state only if interrupt was raised
  124. * on praticular channel and also checking global interrupt
  125. * is raised for channel control.
  126. */
  127. if ((type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) &&
  128. ((gsi_pending_intr >> chan_hdl) & 1)) {
  129. /*
  130. * Check channel state here in case the channel is
  131. * already started but interrupt is not yet received.
  132. */
  133. val = gsi_readl(gsi_ctx->base +
  134. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
  135. gsi_ctx->per.ee));
  136. curr_state = (val &
  137. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  138. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  139. }
  140. if (op == GSI_CH_START) {
  141. if (curr_state == GSI_CHAN_STATE_STARTED) {
  142. ctx->state = curr_state;
  143. return;
  144. }
  145. }
  146. if (op == GSI_CH_STOP) {
  147. if (curr_state == GSI_CHAN_STATE_STOPPED)
  148. stop_retry++;
  149. else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
  150. stop_in_proc_retry++;
  151. }
  152. /* if interrupt marked reg after poll count reaching to max
  153. * keep loop to continue reach max stop proc and max stop count.
  154. */
  155. if (stop_retry == 1 || stop_in_proc_retry == 1)
  156. poll_cnt = 0;
  157. /* If stop channel retry reached to max count
  158. * clear the pending interrupt, if channel already stopped.
  159. */
  160. if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
  161. gsi_writel(gsi_pending_intr, gsi_ctx->base +
  162. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  163. ctx->state = curr_state;
  164. return;
  165. }
  166. /* If channel state stop in progress case no need
  167. * to wait for long time.
  168. */
  169. if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
  170. ctx->state = curr_state;
  171. return;
  172. }
  173. GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
  174. chan_hdl,
  175. type,
  176. ctx->state,
  177. gsi_pending_intr);
  178. }
  179. GSIDBG("invalidating the channel state when timeout happens\n");
  180. ctx->state = curr_state;
  181. }
  182. static void gsi_handle_ch_ctrl(int ee)
  183. {
  184. uint32_t ch;
  185. int i;
  186. uint32_t val;
  187. struct gsi_chan_ctx *ctx;
  188. ch = gsi_readl(gsi_ctx->base +
  189. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
  190. gsi_writel(ch, gsi_ctx->base +
  191. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  192. GSIDBG("ch %x\n", ch);
  193. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  194. if ((1 << i) & ch) {
  195. if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
  196. GSIERR("invalid channel %d\n", i);
  197. break;
  198. }
  199. ctx = &gsi_ctx->chan[i];
  200. val = gsi_readl(gsi_ctx->base +
  201. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
  202. ctx->state = (val &
  203. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  204. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  205. GSIDBG("ch %u state updated to %u\n", i, ctx->state);
  206. complete(&ctx->compl);
  207. gsi_ctx->ch_dbg[i].cmd_completed++;
  208. }
  209. }
  210. }
  211. static void gsi_handle_ev_ctrl(int ee)
  212. {
  213. uint32_t ch;
  214. int i;
  215. uint32_t val;
  216. struct gsi_evt_ctx *ctx;
  217. ch = gsi_readl(gsi_ctx->base +
  218. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
  219. gsi_writel(ch, gsi_ctx->base +
  220. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
  221. GSIDBG("ev %x\n", ch);
  222. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  223. if ((1 << i) & ch) {
  224. if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
  225. GSIERR("invalid event %d\n", i);
  226. break;
  227. }
  228. ctx = &gsi_ctx->evtr[i];
  229. val = gsi_readl(gsi_ctx->base +
  230. GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
  231. ctx->state = (val &
  232. GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  233. GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
  234. GSIDBG("evt %u state updated to %u\n", i, ctx->state);
  235. complete(&ctx->compl);
  236. }
  237. }
  238. }
  239. static void gsi_handle_glob_err(uint32_t err)
  240. {
  241. struct gsi_log_err *log;
  242. struct gsi_chan_ctx *ch;
  243. struct gsi_evt_ctx *ev;
  244. struct gsi_chan_err_notify chan_notify;
  245. struct gsi_evt_err_notify evt_notify;
  246. struct gsi_per_notify per_notify;
  247. uint32_t val;
  248. enum gsi_err_type err_type;
  249. log = (struct gsi_log_err *)&err;
  250. GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
  251. log->virt_idx);
  252. GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
  253. log->arg2, log->arg3);
  254. err_type = log->err_type;
  255. /*
  256. * These are errors thrown by hardware. We need
  257. * BUG_ON() to capture the hardware state right
  258. * when it is unexpected.
  259. */
  260. switch (err_type) {
  261. case GSI_ERR_TYPE_GLOB:
  262. per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
  263. per_notify.user_data = gsi_ctx->per.user_data;
  264. per_notify.data.err_desc = err & 0xFFFF;
  265. gsi_ctx->per.notify_cb(&per_notify);
  266. break;
  267. case GSI_ERR_TYPE_CHAN:
  268. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
  269. GSIERR("Unexpected ch %d\n", log->virt_idx);
  270. return;
  271. }
  272. ch = &gsi_ctx->chan[log->virt_idx];
  273. chan_notify.chan_user_data = ch->props.chan_user_data;
  274. chan_notify.err_desc = err & 0xFFFF;
  275. if (log->code == GSI_INVALID_TRE_ERR) {
  276. if (log->ee != gsi_ctx->per.ee) {
  277. GSIERR("unexpected EE in event %d\n", log->ee);
  278. GSI_ASSERT();
  279. }
  280. val = gsi_readl(gsi_ctx->base +
  281. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
  282. gsi_ctx->per.ee));
  283. ch->state = (val &
  284. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  285. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  286. GSIDBG("ch %u state updated to %u\n", log->virt_idx,
  287. ch->state);
  288. ch->stats.invalid_tre_error++;
  289. if (ch->state == GSI_CHAN_STATE_ERROR) {
  290. GSIERR("Unexpected channel state %d\n",
  291. ch->state);
  292. GSI_ASSERT();
  293. }
  294. chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
  295. } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  296. if (log->ee != gsi_ctx->per.ee) {
  297. GSIERR("unexpected EE in event %d\n", log->ee);
  298. GSI_ASSERT();
  299. }
  300. chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
  301. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  302. if (log->ee != gsi_ctx->per.ee) {
  303. GSIERR("unexpected EE in event %d\n", log->ee);
  304. GSI_ASSERT();
  305. }
  306. chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
  307. complete(&ch->compl);
  308. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  309. chan_notify.evt_id =
  310. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
  311. } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
  312. if (log->ee != gsi_ctx->per.ee) {
  313. GSIERR("unexpected EE in event %d\n", log->ee);
  314. GSI_ASSERT();
  315. }
  316. chan_notify.evt_id =
  317. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
  318. } else if (log->code == GSI_HWO_1_ERR) {
  319. if (log->ee != gsi_ctx->per.ee) {
  320. GSIERR("unexpected EE in event %d\n", log->ee);
  321. GSI_ASSERT();
  322. }
  323. chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
  324. } else {
  325. GSIERR("unexpected event log code %d\n", log->code);
  326. GSI_ASSERT();
  327. }
  328. ch->props.err_cb(&chan_notify);
  329. break;
  330. case GSI_ERR_TYPE_EVT:
  331. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
  332. GSIERR("Unexpected ev %d\n", log->virt_idx);
  333. return;
  334. }
  335. ev = &gsi_ctx->evtr[log->virt_idx];
  336. evt_notify.user_data = ev->props.user_data;
  337. evt_notify.err_desc = err & 0xFFFF;
  338. if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  339. if (log->ee != gsi_ctx->per.ee) {
  340. GSIERR("unexpected EE in event %d\n", log->ee);
  341. GSI_ASSERT();
  342. }
  343. evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
  344. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  345. if (log->ee != gsi_ctx->per.ee) {
  346. GSIERR("unexpected EE in event %d\n", log->ee);
  347. GSI_ASSERT();
  348. }
  349. evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
  350. complete(&ev->compl);
  351. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  352. evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
  353. } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
  354. if (log->ee != gsi_ctx->per.ee) {
  355. GSIERR("unexpected EE in event %d\n", log->ee);
  356. GSI_ASSERT();
  357. }
  358. evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
  359. } else {
  360. GSIERR("unexpected event log code %d\n", log->code);
  361. GSI_ASSERT();
  362. }
  363. ev->props.err_cb(&evt_notify);
  364. break;
  365. }
  366. }
  367. static void gsi_handle_gp_int1(void)
  368. {
  369. complete(&gsi_ctx->gen_ee_cmd_compl);
  370. }
  371. static void gsi_handle_glob_ee(int ee)
  372. {
  373. uint32_t val;
  374. uint32_t err;
  375. struct gsi_per_notify notify;
  376. uint32_t clr = ~0;
  377. val = gsi_readl(gsi_ctx->base +
  378. GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
  379. notify.user_data = gsi_ctx->per.user_data;
  380. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
  381. err = gsi_readl(gsi_ctx->base +
  382. GSI_EE_n_ERROR_LOG_OFFS(ee));
  383. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  384. gsi_writel(0, gsi_ctx->base +
  385. GSI_EE_n_ERROR_LOG_OFFS(ee));
  386. gsi_writel(clr, gsi_ctx->base +
  387. GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
  388. gsi_handle_glob_err(err);
  389. }
  390. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK)
  391. gsi_handle_gp_int1();
  392. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
  393. notify.evt_id = GSI_PER_EVT_GLOB_GP2;
  394. gsi_ctx->per.notify_cb(&notify);
  395. }
  396. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
  397. notify.evt_id = GSI_PER_EVT_GLOB_GP3;
  398. gsi_ctx->per.notify_cb(&notify);
  399. }
  400. gsi_writel(val, gsi_ctx->base +
  401. GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
  402. }
  403. static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
  404. {
  405. ctx->wp_local += ctx->elem_sz;
  406. if (ctx->wp_local == ctx->end)
  407. ctx->wp_local = ctx->base;
  408. }
  409. static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
  410. {
  411. ctx->rp_local += ctx->elem_sz;
  412. if (ctx->rp_local == ctx->end)
  413. ctx->rp_local = ctx->base;
  414. }
  415. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
  416. {
  417. WARN_ON(addr < ctx->base || addr >= ctx->end);
  418. return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
  419. }
  420. static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
  421. uint64_t addr2)
  422. {
  423. uint32_t addr_diff;
  424. GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
  425. ctx->base, ctx->end);
  426. if (addr1 < ctx->base || addr1 >= ctx->end) {
  427. GSIERR("address = 0x%llx not in range\n", addr1);
  428. GSI_ASSERT();
  429. }
  430. if (addr2 < ctx->base || addr2 >= ctx->end) {
  431. GSIERR("address = 0x%llx not in range\n", addr2);
  432. GSI_ASSERT();
  433. }
  434. addr_diff = (uint32_t)(addr2 - addr1);
  435. if (addr1 < addr2)
  436. return addr_diff / ctx->elem_sz;
  437. else
  438. return (addr_diff + ctx->len) / ctx->elem_sz;
  439. }
  440. static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
  441. struct gsi_chan_xfer_notify *notify, bool callback)
  442. {
  443. uint32_t ch_id;
  444. struct gsi_chan_ctx *ch_ctx;
  445. uint16_t rp_idx;
  446. uint64_t rp;
  447. ch_id = evt->chid;
  448. if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
  449. GSIERR("Unexpected ch %d\n", ch_id);
  450. return;
  451. }
  452. ch_ctx = &gsi_ctx->chan[ch_id];
  453. if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
  454. ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
  455. return;
  456. if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
  457. rp = evt->xfer_ptr;
  458. if (ch_ctx->ring.rp_local != rp) {
  459. ch_ctx->stats.completed +=
  460. gsi_get_complete_num(&ch_ctx->ring,
  461. ch_ctx->ring.rp_local, rp);
  462. ch_ctx->ring.rp_local = rp;
  463. }
  464. /* the element at RP is also processed */
  465. gsi_incr_ring_rp(&ch_ctx->ring);
  466. ch_ctx->ring.rp = ch_ctx->ring.rp_local;
  467. rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
  468. notify->veid = GSI_VEID_DEFAULT;
  469. } else {
  470. rp_idx = evt->cookie;
  471. notify->veid = evt->veid;
  472. }
  473. ch_ctx->stats.completed++;
  474. WARN_ON(!ch_ctx->user_data[rp_idx].valid);
  475. notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
  476. ch_ctx->user_data[rp_idx].valid = false;
  477. notify->chan_user_data = ch_ctx->props.chan_user_data;
  478. notify->evt_id = evt->code;
  479. notify->bytes_xfered = evt->len;
  480. if (callback) {
  481. if (atomic_read(&ch_ctx->poll_mode)) {
  482. GSIERR("Calling client callback in polling mode\n");
  483. WARN_ON(1);
  484. }
  485. ch_ctx->props.xfer_cb(notify);
  486. }
  487. }
  488. static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
  489. struct gsi_chan_xfer_notify *notify, bool callback)
  490. {
  491. struct gsi_xfer_compl_evt *evt;
  492. evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
  493. ctx->ring.rp_local - ctx->ring.base);
  494. gsi_process_chan(evt, notify, callback);
  495. gsi_incr_ring_rp(&ctx->ring);
  496. /* recycle this element */
  497. gsi_incr_ring_wp(&ctx->ring);
  498. ctx->stats.completed++;
  499. }
  500. static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
  501. {
  502. uint32_t val;
  503. ctx->ring.wp = ctx->ring.wp_local;
  504. val = (ctx->ring.wp_local &
  505. GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
  506. GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
  507. gsi_writel(val, gsi_ctx->base +
  508. GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
  509. gsi_ctx->per.ee));
  510. }
  511. static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
  512. {
  513. uint32_t val;
  514. /*
  515. * allocate new events for this channel first
  516. * before submitting the new TREs.
  517. * for TO_GSI channels the event ring doorbell is rang as part of
  518. * interrupt handling.
  519. */
  520. if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  521. gsi_ring_evt_doorbell(ctx->evtr);
  522. ctx->ring.wp = ctx->ring.wp_local;
  523. val = (ctx->ring.wp_local &
  524. GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
  525. GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
  526. gsi_writel(val, gsi_ctx->base +
  527. GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
  528. gsi_ctx->per.ee));
  529. }
  530. static void gsi_handle_ieob(int ee)
  531. {
  532. uint32_t ch;
  533. int i;
  534. uint64_t rp;
  535. struct gsi_evt_ctx *ctx;
  536. struct gsi_chan_xfer_notify notify;
  537. unsigned long flags;
  538. unsigned long cntr;
  539. uint32_t msk;
  540. bool empty;
  541. ch = gsi_readl(gsi_ctx->base +
  542. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
  543. msk = gsi_readl(gsi_ctx->base +
  544. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  545. gsi_writel(ch & msk, gsi_ctx->base +
  546. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  547. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  548. if ((1 << i) & ch & msk) {
  549. if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
  550. GSIERR("invalid event %d\n", i);
  551. break;
  552. }
  553. ctx = &gsi_ctx->evtr[i];
  554. /*
  555. * Don't handle MSI interrupts, only handle IEOB
  556. * IRQs
  557. */
  558. if (ctx->props.intr == GSI_INTR_MSI)
  559. continue;
  560. if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  561. GSIERR("Unexpected irq intf %d\n",
  562. ctx->props.intf);
  563. GSI_ASSERT();
  564. }
  565. spin_lock_irqsave(&ctx->ring.slock, flags);
  566. check_again:
  567. cntr = 0;
  568. empty = true;
  569. rp = gsi_readl(gsi_ctx->base +
  570. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
  571. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  572. ctx->ring.rp = rp;
  573. while (ctx->ring.rp_local != rp) {
  574. ++cntr;
  575. if (ctx->props.exclusive &&
  576. atomic_read(&ctx->chan->poll_mode)) {
  577. cntr = 0;
  578. break;
  579. }
  580. gsi_process_evt_re(ctx, &notify, true);
  581. empty = false;
  582. }
  583. if (!empty)
  584. gsi_ring_evt_doorbell(ctx);
  585. if (cntr != 0)
  586. goto check_again;
  587. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  588. }
  589. }
  590. }
  591. static void gsi_handle_inter_ee_ch_ctrl(int ee)
  592. {
  593. uint32_t ch;
  594. int i;
  595. ch = gsi_readl(gsi_ctx->base +
  596. GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
  597. gsi_writel(ch, gsi_ctx->base +
  598. GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  599. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  600. if ((1 << i) & ch) {
  601. /* not currently expected */
  602. GSIERR("ch %u was inter-EE changed\n", i);
  603. }
  604. }
  605. }
  606. static void gsi_handle_inter_ee_ev_ctrl(int ee)
  607. {
  608. uint32_t ch;
  609. int i;
  610. ch = gsi_readl(gsi_ctx->base +
  611. GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
  612. gsi_writel(ch, gsi_ctx->base +
  613. GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
  614. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  615. if ((1 << i) & ch) {
  616. /* not currently expected */
  617. GSIERR("evt %u was inter-EE changed\n", i);
  618. }
  619. }
  620. }
  621. static void gsi_handle_general(int ee)
  622. {
  623. uint32_t val;
  624. struct gsi_per_notify notify;
  625. val = gsi_readl(gsi_ctx->base +
  626. GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
  627. notify.user_data = gsi_ctx->per.user_data;
  628. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
  629. notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
  630. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
  631. notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
  632. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
  633. notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
  634. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
  635. notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
  636. if (gsi_ctx->per.notify_cb)
  637. gsi_ctx->per.notify_cb(&notify);
  638. gsi_writel(val, gsi_ctx->base +
  639. GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
  640. }
  641. #define GSI_ISR_MAX_ITER 50
  642. static void gsi_handle_irq(void)
  643. {
  644. uint32_t type;
  645. int ee = gsi_ctx->per.ee;
  646. unsigned long cnt = 0;
  647. while (1) {
  648. type = gsi_readl(gsi_ctx->base +
  649. GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
  650. if (!type)
  651. break;
  652. GSIDBG_LOW("type 0x%x\n", type);
  653. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
  654. gsi_handle_ch_ctrl(ee);
  655. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
  656. gsi_handle_ev_ctrl(ee);
  657. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
  658. gsi_handle_glob_ee(ee);
  659. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
  660. gsi_handle_ieob(ee);
  661. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
  662. gsi_handle_inter_ee_ch_ctrl(ee);
  663. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
  664. gsi_handle_inter_ee_ev_ctrl(ee);
  665. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
  666. gsi_handle_general(ee);
  667. if (++cnt > GSI_ISR_MAX_ITER) {
  668. /*
  669. * Max number of spurious interrupts from hardware.
  670. * Unexpected hardware state.
  671. */
  672. GSIERR("Too many spurious interrupt from GSI HW\n");
  673. GSI_ASSERT();
  674. }
  675. }
  676. }
  677. static irqreturn_t gsi_isr(int irq, void *ctxt)
  678. {
  679. if (gsi_ctx->per.req_clk_cb) {
  680. bool granted = false;
  681. gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
  682. if (granted) {
  683. gsi_handle_irq();
  684. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  685. }
  686. } else {
  687. if (!gsi_ctx->per.clk_status_cb())
  688. return IRQ_HANDLED;
  689. gsi_handle_irq();
  690. }
  691. return IRQ_HANDLED;
  692. }
  693. static uint32_t gsi_get_max_channels(enum gsi_ver ver)
  694. {
  695. uint32_t reg = 0;
  696. switch (ver) {
  697. case GSI_VER_ERR:
  698. case GSI_VER_MAX:
  699. GSIERR("GSI version is not supported %d\n", ver);
  700. WARN_ON(1);
  701. break;
  702. case GSI_VER_1_0:
  703. reg = gsi_readl(gsi_ctx->base +
  704. GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
  705. reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
  706. GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
  707. break;
  708. case GSI_VER_1_2:
  709. reg = gsi_readl(gsi_ctx->base +
  710. GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
  711. reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
  712. GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
  713. break;
  714. case GSI_VER_1_3:
  715. reg = gsi_readl(gsi_ctx->base +
  716. GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  717. reg = (reg &
  718. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  719. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  720. break;
  721. case GSI_VER_2_0:
  722. reg = gsi_readl(gsi_ctx->base +
  723. GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  724. reg = (reg &
  725. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  726. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  727. break;
  728. case GSI_VER_2_2:
  729. reg = gsi_readl(gsi_ctx->base +
  730. GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  731. reg = (reg &
  732. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  733. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  734. break;
  735. case GSI_VER_2_5:
  736. reg = gsi_readl(gsi_ctx->base +
  737. GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  738. reg = (reg &
  739. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  740. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  741. break;
  742. case GSI_VER_2_7:
  743. reg = gsi_readl(gsi_ctx->base +
  744. GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  745. reg = (reg &
  746. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  747. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  748. break;
  749. case GSI_VER_2_9:
  750. reg = gsi_readl(gsi_ctx->base +
  751. GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  752. reg = (reg &
  753. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  754. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  755. break;
  756. }
  757. GSIDBG("max channels %d\n", reg);
  758. return reg;
  759. }
  760. static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
  761. {
  762. uint32_t reg = 0;
  763. switch (ver) {
  764. case GSI_VER_ERR:
  765. case GSI_VER_MAX:
  766. GSIERR("GSI version is not supported %d\n", ver);
  767. WARN_ON(1);
  768. break;
  769. case GSI_VER_1_0:
  770. reg = gsi_readl(gsi_ctx->base +
  771. GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
  772. reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
  773. GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
  774. break;
  775. case GSI_VER_1_2:
  776. reg = gsi_readl(gsi_ctx->base +
  777. GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
  778. reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
  779. GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
  780. break;
  781. case GSI_VER_1_3:
  782. reg = gsi_readl(gsi_ctx->base +
  783. GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  784. reg = (reg &
  785. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  786. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  787. break;
  788. case GSI_VER_2_0:
  789. reg = gsi_readl(gsi_ctx->base +
  790. GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  791. reg = (reg &
  792. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  793. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  794. break;
  795. case GSI_VER_2_2:
  796. reg = gsi_readl(gsi_ctx->base +
  797. GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  798. reg = (reg &
  799. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  800. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  801. break;
  802. case GSI_VER_2_5:
  803. reg = gsi_readl(gsi_ctx->base +
  804. GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  805. reg = (reg &
  806. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  807. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  808. break;
  809. case GSI_VER_2_7:
  810. reg = gsi_readl(gsi_ctx->base +
  811. GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  812. reg = (reg &
  813. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  814. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  815. break;
  816. case GSI_VER_2_9:
  817. reg = gsi_readl(gsi_ctx->base +
  818. GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  819. reg = (reg &
  820. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  821. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  822. break;
  823. }
  824. GSIDBG("max event rings %d\n", reg);
  825. return reg;
  826. }
  827. int gsi_complete_clk_grant(unsigned long dev_hdl)
  828. {
  829. unsigned long flags;
  830. if (!gsi_ctx) {
  831. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  832. return -GSI_STATUS_NODEV;
  833. }
  834. if (!gsi_ctx->per_registered) {
  835. GSIERR("no client registered\n");
  836. return -GSI_STATUS_INVALID_PARAMS;
  837. }
  838. if (dev_hdl != (uintptr_t)gsi_ctx) {
  839. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  840. gsi_ctx);
  841. return -GSI_STATUS_INVALID_PARAMS;
  842. }
  843. spin_lock_irqsave(&gsi_ctx->slock, flags);
  844. gsi_handle_irq();
  845. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  846. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  847. return GSI_STATUS_SUCCESS;
  848. }
  849. EXPORT_SYMBOL(gsi_complete_clk_grant);
  850. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size)
  851. {
  852. if (!gsi_ctx) {
  853. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  854. return -GSI_STATUS_NODEV;
  855. }
  856. gsi_ctx->base = devm_ioremap_nocache(
  857. gsi_ctx->dev, gsi_base_addr, gsi_size);
  858. if (!gsi_ctx->base) {
  859. GSIERR("failed to map access to GSI HW\n");
  860. return -GSI_STATUS_RES_ALLOC_FAILURE;
  861. }
  862. GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
  863. &gsi_base_addr,
  864. gsi_ctx->base,
  865. gsi_size);
  866. return 0;
  867. }
  868. EXPORT_SYMBOL(gsi_map_base);
  869. int gsi_unmap_base(void)
  870. {
  871. if (!gsi_ctx) {
  872. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  873. return -GSI_STATUS_NODEV;
  874. }
  875. if (!gsi_ctx->base) {
  876. GSIERR("access to GSI HW has not been mapped\n");
  877. return -GSI_STATUS_INVALID_PARAMS;
  878. }
  879. devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
  880. gsi_ctx->base = NULL;
  881. return 0;
  882. }
  883. EXPORT_SYMBOL(gsi_unmap_base);
  884. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
  885. {
  886. int res;
  887. uint32_t val;
  888. int needed_reg_ver;
  889. if (!gsi_ctx) {
  890. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  891. return -GSI_STATUS_NODEV;
  892. }
  893. if (!props || !dev_hdl) {
  894. GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
  895. return -GSI_STATUS_INVALID_PARAMS;
  896. }
  897. if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
  898. GSIERR("bad params gsi_ver=%d\n", props->ver);
  899. return -GSI_STATUS_INVALID_PARAMS;
  900. }
  901. if (!props->notify_cb) {
  902. GSIERR("notify callback must be provided\n");
  903. return -GSI_STATUS_INVALID_PARAMS;
  904. }
  905. if (props->req_clk_cb && !props->rel_clk_cb) {
  906. GSIERR("rel callback must be provided\n");
  907. return -GSI_STATUS_INVALID_PARAMS;
  908. }
  909. if (gsi_ctx->per_registered) {
  910. GSIERR("per already registered\n");
  911. return -GSI_STATUS_UNSUPPORTED_OP;
  912. }
  913. switch (props->ver) {
  914. case GSI_VER_1_0:
  915. case GSI_VER_1_2:
  916. case GSI_VER_1_3:
  917. case GSI_VER_2_0:
  918. case GSI_VER_2_2:
  919. needed_reg_ver = GSI_REGISTER_VER_1;
  920. break;
  921. case GSI_VER_2_5:
  922. case GSI_VER_2_7:
  923. case GSI_VER_2_9:
  924. needed_reg_ver = GSI_REGISTER_VER_2;
  925. break;
  926. case GSI_VER_ERR:
  927. case GSI_VER_MAX:
  928. default:
  929. GSIERR("GSI version is not supported %d\n", props->ver);
  930. return -GSI_STATUS_INVALID_PARAMS;
  931. }
  932. if (needed_reg_ver != GSI_REGISTER_VER_CURRENT) {
  933. GSIERR("Invalid register version. current=%d, needed=%d\n",
  934. GSI_REGISTER_VER_CURRENT, needed_reg_ver);
  935. return -GSI_STATUS_UNSUPPORTED_OP;
  936. }
  937. GSIDBG("gsi ver %d register ver %d needed register ver %d\n",
  938. props->ver, GSI_REGISTER_VER_CURRENT, needed_reg_ver);
  939. spin_lock_init(&gsi_ctx->slock);
  940. if (props->intr == GSI_INTR_IRQ) {
  941. if (!props->irq) {
  942. GSIERR("bad irq specified %u\n", props->irq);
  943. return -GSI_STATUS_INVALID_PARAMS;
  944. }
  945. /*
  946. * On a real UE, there are two separate interrupt
  947. * vectors that get directed toward the GSI/IPA
  948. * drivers. They are handled by gsi_isr() and
  949. * (ipa_isr() or ipa3_isr()) respectively. In the
  950. * emulation environment, this is not the case;
  951. * instead, interrupt vectors are routed to the
  952. * emualation hardware's interrupt controller, which
  953. * in turn, forwards a single interrupt to the GSI/IPA
  954. * driver. When the new interrupt vector is received,
  955. * the driver needs to probe the interrupt
  956. * controller's registers so see if one, the other, or
  957. * both interrupts have occurred. Given the above, we
  958. * now need to handle both situations, namely: the
  959. * emulator's and the real UE.
  960. */
  961. if (running_emulation) {
  962. /*
  963. * New scheme involving the emulator's
  964. * interrupt controller.
  965. */
  966. res = devm_request_threaded_irq(
  967. gsi_ctx->dev,
  968. props->irq,
  969. /* top half handler to follow */
  970. emulator_hard_irq_isr,
  971. /* threaded bottom half handler to follow */
  972. emulator_soft_irq_isr,
  973. IRQF_SHARED,
  974. "emulator_intcntrlr",
  975. gsi_ctx);
  976. } else {
  977. /*
  978. * Traditional scheme used on the real UE.
  979. */
  980. res = devm_request_irq(gsi_ctx->dev, props->irq,
  981. gsi_isr,
  982. props->req_clk_cb ? IRQF_TRIGGER_RISING :
  983. IRQF_TRIGGER_HIGH,
  984. "gsi",
  985. gsi_ctx);
  986. }
  987. if (res) {
  988. GSIERR(
  989. "failed to register isr for %u\n",
  990. props->irq);
  991. return -GSI_STATUS_ERROR;
  992. }
  993. GSIDBG(
  994. "succeeded to register isr for %u\n",
  995. props->irq);
  996. res = enable_irq_wake(props->irq);
  997. if (res)
  998. GSIERR("failed to enable wake irq %u\n", props->irq);
  999. else
  1000. GSIERR("GSI irq is wake enabled %u\n", props->irq);
  1001. } else {
  1002. GSIERR("do not support interrupt type %u\n", props->intr);
  1003. return -GSI_STATUS_UNSUPPORTED_OP;
  1004. }
  1005. /*
  1006. * If base not previously mapped via gsi_map_base(), map it
  1007. * now...
  1008. */
  1009. if (!gsi_ctx->base) {
  1010. res = gsi_map_base(props->phys_addr, props->size);
  1011. if (res)
  1012. return res;
  1013. }
  1014. if (running_emulation) {
  1015. GSIDBG("GSI SW ver register value 0x%x\n",
  1016. gsi_readl(gsi_ctx->base +
  1017. GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
  1018. gsi_ctx->intcntrlr_mem_size =
  1019. props->emulator_intcntrlr_size;
  1020. gsi_ctx->intcntrlr_base =
  1021. devm_ioremap_nocache(
  1022. gsi_ctx->dev,
  1023. props->emulator_intcntrlr_addr,
  1024. props->emulator_intcntrlr_size);
  1025. if (!gsi_ctx->intcntrlr_base) {
  1026. GSIERR(
  1027. "failed to remap emulator's interrupt controller HW\n");
  1028. gsi_unmap_base();
  1029. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1030. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1031. }
  1032. GSIDBG(
  1033. "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
  1034. &(props->emulator_intcntrlr_addr),
  1035. gsi_ctx->intcntrlr_base,
  1036. props->emulator_intcntrlr_size);
  1037. gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
  1038. gsi_ctx->intcntrlr_client_isr =
  1039. props->emulator_intcntrlr_client_isr;
  1040. }
  1041. gsi_ctx->per = *props;
  1042. gsi_ctx->per_registered = true;
  1043. mutex_init(&gsi_ctx->mlock);
  1044. atomic_set(&gsi_ctx->num_chan, 0);
  1045. atomic_set(&gsi_ctx->num_evt_ring, 0);
  1046. gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
  1047. if (gsi_ctx->max_ch == 0) {
  1048. gsi_unmap_base();
  1049. if (running_emulation)
  1050. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1051. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1052. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1053. GSIERR("failed to get max channels\n");
  1054. return -GSI_STATUS_ERROR;
  1055. }
  1056. gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
  1057. if (gsi_ctx->max_ev == 0) {
  1058. gsi_unmap_base();
  1059. if (running_emulation)
  1060. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1061. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1062. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1063. GSIERR("failed to get max event rings\n");
  1064. return -GSI_STATUS_ERROR;
  1065. }
  1066. if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
  1067. GSIERR("max event rings are beyond absolute maximum\n");
  1068. return -GSI_STATUS_ERROR;
  1069. }
  1070. if (props->mhi_er_id_limits_valid &&
  1071. props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
  1072. gsi_unmap_base();
  1073. if (running_emulation)
  1074. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1075. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1076. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1077. GSIERR("MHI event ring start id %u is beyond max %u\n",
  1078. props->mhi_er_id_limits[0], gsi_ctx->max_ev);
  1079. return -GSI_STATUS_ERROR;
  1080. }
  1081. gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
  1082. /* exclude reserved mhi events */
  1083. if (props->mhi_er_id_limits_valid)
  1084. gsi_ctx->evt_bmap |=
  1085. ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
  1086. ((1 << (props->mhi_er_id_limits[0])) - 1);
  1087. /*
  1088. * enable all interrupts but GSI_BREAK_POINT.
  1089. * Inter EE commands / interrupt are no supported.
  1090. */
  1091. __gsi_config_type_irq(props->ee, ~0, ~0);
  1092. __gsi_config_ch_irq(props->ee, ~0, ~0);
  1093. __gsi_config_evt_irq(props->ee, ~0, ~0);
  1094. __gsi_config_ieob_irq(props->ee, ~0, ~0);
  1095. __gsi_config_glob_irq(props->ee, ~0, ~0);
  1096. __gsi_config_gen_irq(props->ee, ~0,
  1097. ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
  1098. gsi_writel(props->intr, gsi_ctx->base +
  1099. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  1100. /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
  1101. if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
  1102. (props->intr != GSI_INTR_MSI)) {
  1103. gsi_writel(0, gsi_ctx->base +
  1104. GSI_EE_n_CNTXT_MSI_BASE_LSB(gsi_ctx->per.ee));
  1105. gsi_writel(0, gsi_ctx->base +
  1106. GSI_EE_n_CNTXT_MSI_BASE_MSB(gsi_ctx->per.ee));
  1107. }
  1108. val = gsi_readl(gsi_ctx->base +
  1109. GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
  1110. if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
  1111. gsi_ctx->enabled = true;
  1112. else
  1113. GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
  1114. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  1115. gsi_writel(0, gsi_ctx->base +
  1116. GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
  1117. if (running_emulation) {
  1118. /*
  1119. * Set up the emulator's interrupt controller...
  1120. */
  1121. res = setup_emulator_cntrlr(
  1122. gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
  1123. if (res != 0) {
  1124. gsi_unmap_base();
  1125. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1126. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1127. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1128. GSIERR("setup_emulator_cntrlr() failed\n");
  1129. return res;
  1130. }
  1131. }
  1132. *dev_hdl = (uintptr_t)gsi_ctx;
  1133. return GSI_STATUS_SUCCESS;
  1134. }
  1135. EXPORT_SYMBOL(gsi_register_device);
  1136. int gsi_write_device_scratch(unsigned long dev_hdl,
  1137. struct gsi_device_scratch *val)
  1138. {
  1139. unsigned int max_usb_pkt_size = 0;
  1140. if (!gsi_ctx) {
  1141. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1142. return -GSI_STATUS_NODEV;
  1143. }
  1144. if (!gsi_ctx->per_registered) {
  1145. GSIERR("no client registered\n");
  1146. return -GSI_STATUS_INVALID_PARAMS;
  1147. }
  1148. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1149. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1150. gsi_ctx);
  1151. return -GSI_STATUS_INVALID_PARAMS;
  1152. }
  1153. if (val->max_usb_pkt_size_valid &&
  1154. val->max_usb_pkt_size != 1024 &&
  1155. val->max_usb_pkt_size != 512 &&
  1156. val->max_usb_pkt_size != 64) {
  1157. GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
  1158. val->max_usb_pkt_size);
  1159. return -GSI_STATUS_INVALID_PARAMS;
  1160. }
  1161. mutex_lock(&gsi_ctx->mlock);
  1162. if (val->mhi_base_chan_idx_valid)
  1163. gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
  1164. val->mhi_base_chan_idx;
  1165. if (val->max_usb_pkt_size_valid) {
  1166. max_usb_pkt_size = 2;
  1167. if (val->max_usb_pkt_size > 64)
  1168. max_usb_pkt_size =
  1169. (val->max_usb_pkt_size == 1024) ? 1 : 0;
  1170. gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
  1171. }
  1172. gsi_writel(gsi_ctx->scratch.word0.val,
  1173. gsi_ctx->base +
  1174. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  1175. mutex_unlock(&gsi_ctx->mlock);
  1176. return GSI_STATUS_SUCCESS;
  1177. }
  1178. EXPORT_SYMBOL(gsi_write_device_scratch);
  1179. int gsi_deregister_device(unsigned long dev_hdl, bool force)
  1180. {
  1181. if (!gsi_ctx) {
  1182. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1183. return -GSI_STATUS_NODEV;
  1184. }
  1185. if (!gsi_ctx->per_registered) {
  1186. GSIERR("no client registered\n");
  1187. return -GSI_STATUS_INVALID_PARAMS;
  1188. }
  1189. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1190. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1191. gsi_ctx);
  1192. return -GSI_STATUS_INVALID_PARAMS;
  1193. }
  1194. if (!force && atomic_read(&gsi_ctx->num_chan)) {
  1195. GSIERR("cannot deregister %u channels are still connected\n",
  1196. atomic_read(&gsi_ctx->num_chan));
  1197. return -GSI_STATUS_UNSUPPORTED_OP;
  1198. }
  1199. if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
  1200. GSIERR("cannot deregister %u events are still connected\n",
  1201. atomic_read(&gsi_ctx->num_evt_ring));
  1202. return -GSI_STATUS_UNSUPPORTED_OP;
  1203. }
  1204. /* disable all interrupts */
  1205. __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
  1206. __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1207. __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1208. __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1209. __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
  1210. __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
  1211. devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
  1212. gsi_unmap_base();
  1213. memset(gsi_ctx, 0, sizeof(*gsi_ctx));
  1214. return GSI_STATUS_SUCCESS;
  1215. }
  1216. EXPORT_SYMBOL(gsi_deregister_device);
  1217. static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
  1218. uint8_t evt_id, unsigned int ee)
  1219. {
  1220. uint32_t val;
  1221. GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
  1222. props->re_size);
  1223. val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
  1224. GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
  1225. ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
  1226. GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
  1227. ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
  1228. & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
  1229. gsi_writel(val, gsi_ctx->base +
  1230. GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
  1231. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  1232. val = (props->ring_len &
  1233. GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1234. << GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1235. gsi_writel(val, gsi_ctx->base +
  1236. GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
  1237. } else {
  1238. val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1239. << GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1240. gsi_writel(val, gsi_ctx->base +
  1241. GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
  1242. }
  1243. val = (props->ring_base_addr &
  1244. GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
  1245. GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
  1246. gsi_writel(val, gsi_ctx->base +
  1247. GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
  1248. val = ((props->ring_base_addr >> 32) &
  1249. GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
  1250. GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
  1251. gsi_writel(val, gsi_ctx->base +
  1252. GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
  1253. val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
  1254. GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
  1255. ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
  1256. GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
  1257. gsi_writel(val, gsi_ctx->base +
  1258. GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
  1259. val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
  1260. GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
  1261. gsi_writel(val, gsi_ctx->base +
  1262. GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
  1263. val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
  1264. GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
  1265. gsi_writel(val, gsi_ctx->base +
  1266. GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
  1267. val = ((props->msi_addr >> 32) &
  1268. GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
  1269. GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
  1270. gsi_writel(val, gsi_ctx->base +
  1271. GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
  1272. val = (props->rp_update_addr &
  1273. GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
  1274. GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
  1275. gsi_writel(val, gsi_ctx->base +
  1276. GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
  1277. val = ((props->rp_update_addr >> 32) &
  1278. GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
  1279. GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
  1280. gsi_writel(val, gsi_ctx->base +
  1281. GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
  1282. }
  1283. static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
  1284. struct gsi_ring_ctx *ctx)
  1285. {
  1286. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1287. ctx->base = props->ring_base_addr;
  1288. ctx->wp = ctx->base;
  1289. ctx->rp = ctx->base;
  1290. ctx->wp_local = ctx->base;
  1291. ctx->rp_local = ctx->base;
  1292. ctx->len = props->ring_len;
  1293. ctx->elem_sz = props->re_size;
  1294. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1295. ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
  1296. }
  1297. static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
  1298. {
  1299. unsigned long flags;
  1300. uint32_t val;
  1301. spin_lock_irqsave(&ctx->ring.slock, flags);
  1302. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1303. ctx->ring.wp_local = ctx->ring.base +
  1304. ctx->ring.max_num_elem * ctx->ring.elem_sz;
  1305. /* write order MUST be MSB followed by LSB */
  1306. val = ((ctx->ring.wp_local >> 32) &
  1307. GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  1308. GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  1309. gsi_writel(val, gsi_ctx->base +
  1310. GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
  1311. gsi_ctx->per.ee));
  1312. gsi_ring_evt_doorbell(ctx);
  1313. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1314. }
  1315. static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
  1316. {
  1317. unsigned long flags;
  1318. spin_lock_irqsave(&ctx->ring.slock, flags);
  1319. if (ctx->ring.base_va)
  1320. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1321. ctx->ring.wp_local = ctx->ring.base +
  1322. ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
  1323. gsi_ring_evt_doorbell(ctx);
  1324. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1325. }
  1326. static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
  1327. {
  1328. uint64_t ra;
  1329. if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
  1330. props->ring_len % 4) ||
  1331. (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
  1332. props->ring_len % 8) ||
  1333. (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
  1334. props->ring_len % 16)) {
  1335. GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
  1336. props->ring_len, props->re_size);
  1337. return -GSI_STATUS_INVALID_PARAMS;
  1338. }
  1339. ra = props->ring_base_addr;
  1340. do_div(ra, roundup_pow_of_two(props->ring_len));
  1341. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1342. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1343. props->ring_base_addr,
  1344. roundup_pow_of_two(props->ring_len));
  1345. return -GSI_STATUS_INVALID_PARAMS;
  1346. }
  1347. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1348. !props->ring_base_vaddr) {
  1349. GSIERR("protocol %u requires ring base VA\n", props->intf);
  1350. return -GSI_STATUS_INVALID_PARAMS;
  1351. }
  1352. if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
  1353. (!props->evchid_valid ||
  1354. props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
  1355. props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
  1356. GSIERR("MHI requires evchid valid=%d val=%u\n",
  1357. props->evchid_valid, props->evchid);
  1358. return -GSI_STATUS_INVALID_PARAMS;
  1359. }
  1360. if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
  1361. props->evchid_valid) {
  1362. GSIERR("protocol %u cannot specify evchid\n", props->intf);
  1363. return -GSI_STATUS_INVALID_PARAMS;
  1364. }
  1365. if (!props->err_cb) {
  1366. GSIERR("err callback must be provided\n");
  1367. return -GSI_STATUS_INVALID_PARAMS;
  1368. }
  1369. return GSI_STATUS_SUCCESS;
  1370. }
  1371. /**
  1372. * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
  1373. * by IPA driver. Need to do this in GSI since only GSI knows which TRE
  1374. * are being used or not. However, IPA is the one that does cleaning,
  1375. * therefore we pass a callback from IPA and call it using params from GSI
  1376. *
  1377. * @chan_hdl: hdl of the gsi channel user data array to be cleaned
  1378. * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
  1379. * @chan_user_data: ipa_sys_context of the gsi_channel
  1380. * @xfer_uder_data: user data array element (rx_pkt wrapper)
  1381. *
  1382. * Returns: 0 on success, negative on failure
  1383. */
  1384. static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
  1385. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
  1386. {
  1387. struct gsi_chan_ctx *ctx;
  1388. uint64_t i;
  1389. uint16_t rp_idx;
  1390. ctx = &gsi_ctx->chan[chan_hdl];
  1391. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  1392. GSIERR("bad state %d\n", ctx->state);
  1393. return -GSI_STATUS_UNSUPPORTED_OP;
  1394. }
  1395. /* for coalescing, traverse the whole array */
  1396. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  1397. size_t user_data_size =
  1398. ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
  1399. for (i = 0; i < user_data_size; i++) {
  1400. if (ctx->user_data[i].valid)
  1401. cleanup_cb(ctx->props.chan_user_data,
  1402. ctx->user_data[i].p);
  1403. }
  1404. } else {
  1405. /* for non-coalescing, clean between RP and WP */
  1406. while (ctx->ring.rp_local != ctx->ring.wp_local) {
  1407. rp_idx = gsi_find_idx_from_addr(&ctx->ring,
  1408. ctx->ring.rp_local);
  1409. WARN_ON(!ctx->user_data[rp_idx].valid);
  1410. cleanup_cb(ctx->props.chan_user_data,
  1411. ctx->user_data[rp_idx].p);
  1412. gsi_incr_ring_rp(&ctx->ring);
  1413. }
  1414. }
  1415. return 0;
  1416. }
  1417. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1418. unsigned long *evt_ring_hdl)
  1419. {
  1420. unsigned long evt_id;
  1421. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
  1422. uint32_t val;
  1423. struct gsi_evt_ctx *ctx;
  1424. int res;
  1425. int ee;
  1426. unsigned long flags;
  1427. if (!gsi_ctx) {
  1428. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1429. return -GSI_STATUS_NODEV;
  1430. }
  1431. if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  1432. GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
  1433. props, dev_hdl, evt_ring_hdl);
  1434. return -GSI_STATUS_INVALID_PARAMS;
  1435. }
  1436. if (gsi_validate_evt_ring_props(props)) {
  1437. GSIERR("invalid params\n");
  1438. return -GSI_STATUS_INVALID_PARAMS;
  1439. }
  1440. if (!props->evchid_valid) {
  1441. mutex_lock(&gsi_ctx->mlock);
  1442. evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
  1443. sizeof(unsigned long) * BITS_PER_BYTE);
  1444. if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
  1445. GSIERR("failed to alloc event ID\n");
  1446. mutex_unlock(&gsi_ctx->mlock);
  1447. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1448. }
  1449. set_bit(evt_id, &gsi_ctx->evt_bmap);
  1450. mutex_unlock(&gsi_ctx->mlock);
  1451. } else {
  1452. evt_id = props->evchid;
  1453. }
  1454. GSIDBG("Using %lu as virt evt id\n", evt_id);
  1455. ctx = &gsi_ctx->evtr[evt_id];
  1456. memset(ctx, 0, sizeof(*ctx));
  1457. mutex_init(&ctx->mlock);
  1458. init_completion(&ctx->compl);
  1459. atomic_set(&ctx->chan_ref_cnt, 0);
  1460. ctx->props = *props;
  1461. mutex_lock(&gsi_ctx->mlock);
  1462. val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1463. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1464. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1465. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1466. ee = gsi_ctx->per.ee;
  1467. gsi_writel(val, gsi_ctx->base +
  1468. GSI_EE_n_EV_CH_CMD_OFFS(ee));
  1469. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1470. if (res == 0) {
  1471. GSIERR("evt_id=%lu timed out\n", evt_id);
  1472. if (!props->evchid_valid)
  1473. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1474. mutex_unlock(&gsi_ctx->mlock);
  1475. return -GSI_STATUS_TIMED_OUT;
  1476. }
  1477. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1478. GSIERR("evt_id=%lu allocation failed state=%u\n",
  1479. evt_id, ctx->state);
  1480. if (!props->evchid_valid)
  1481. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1482. mutex_unlock(&gsi_ctx->mlock);
  1483. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1484. }
  1485. gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
  1486. spin_lock_init(&ctx->ring.slock);
  1487. gsi_init_evt_ring(props, &ctx->ring);
  1488. ctx->id = evt_id;
  1489. *evt_ring_hdl = evt_id;
  1490. atomic_inc(&gsi_ctx->num_evt_ring);
  1491. if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
  1492. gsi_prime_evt_ring(ctx);
  1493. else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
  1494. gsi_prime_evt_ring_wdi(ctx);
  1495. mutex_unlock(&gsi_ctx->mlock);
  1496. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1497. gsi_writel(1 << evt_id, gsi_ctx->base +
  1498. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  1499. /* enable ieob interrupts for GPI, enable MSI interrupts */
  1500. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1501. (props->intr != GSI_INTR_MSI))
  1502. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
  1503. else
  1504. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
  1505. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1506. return GSI_STATUS_SUCCESS;
  1507. }
  1508. EXPORT_SYMBOL(gsi_alloc_evt_ring);
  1509. static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1510. union __packed gsi_evt_scratch val)
  1511. {
  1512. gsi_writel(val.data.word1, gsi_ctx->base +
  1513. GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
  1514. gsi_ctx->per.ee));
  1515. gsi_writel(val.data.word2, gsi_ctx->base +
  1516. GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
  1517. gsi_ctx->per.ee));
  1518. }
  1519. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1520. union __packed gsi_evt_scratch val)
  1521. {
  1522. struct gsi_evt_ctx *ctx;
  1523. if (!gsi_ctx) {
  1524. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1525. return -GSI_STATUS_NODEV;
  1526. }
  1527. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1528. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1529. return -GSI_STATUS_INVALID_PARAMS;
  1530. }
  1531. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1532. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1533. GSIERR("bad state %d\n",
  1534. gsi_ctx->evtr[evt_ring_hdl].state);
  1535. return -GSI_STATUS_UNSUPPORTED_OP;
  1536. }
  1537. mutex_lock(&ctx->mlock);
  1538. ctx->scratch = val;
  1539. __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
  1540. mutex_unlock(&ctx->mlock);
  1541. return GSI_STATUS_SUCCESS;
  1542. }
  1543. EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
  1544. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
  1545. {
  1546. uint32_t val;
  1547. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
  1548. struct gsi_evt_ctx *ctx;
  1549. int res;
  1550. if (!gsi_ctx) {
  1551. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1552. return -GSI_STATUS_NODEV;
  1553. }
  1554. if (evt_ring_hdl >= gsi_ctx->max_ev ||
  1555. evt_ring_hdl >= GSI_EVT_RING_MAX) {
  1556. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1557. return -GSI_STATUS_INVALID_PARAMS;
  1558. }
  1559. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1560. if (atomic_read(&ctx->chan_ref_cnt)) {
  1561. GSIERR("%d channels still using this event ring\n",
  1562. atomic_read(&ctx->chan_ref_cnt));
  1563. return -GSI_STATUS_UNSUPPORTED_OP;
  1564. }
  1565. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1566. GSIERR("bad state %d\n", ctx->state);
  1567. return -GSI_STATUS_UNSUPPORTED_OP;
  1568. }
  1569. mutex_lock(&gsi_ctx->mlock);
  1570. reinit_completion(&ctx->compl);
  1571. val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1572. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1573. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1574. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1575. gsi_writel(val, gsi_ctx->base +
  1576. GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
  1577. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1578. if (res == 0) {
  1579. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1580. mutex_unlock(&gsi_ctx->mlock);
  1581. return -GSI_STATUS_TIMED_OUT;
  1582. }
  1583. if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1584. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1585. ctx->state);
  1586. /*
  1587. * IPA Hardware returned GSI RING not allocated, which is
  1588. * unexpected hardware state.
  1589. */
  1590. GSI_ASSERT();
  1591. }
  1592. mutex_unlock(&gsi_ctx->mlock);
  1593. if (!ctx->props.evchid_valid) {
  1594. mutex_lock(&gsi_ctx->mlock);
  1595. clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
  1596. mutex_unlock(&gsi_ctx->mlock);
  1597. }
  1598. atomic_dec(&gsi_ctx->num_evt_ring);
  1599. return GSI_STATUS_SUCCESS;
  1600. }
  1601. EXPORT_SYMBOL(gsi_dealloc_evt_ring);
  1602. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1603. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  1604. {
  1605. struct gsi_evt_ctx *ctx;
  1606. if (!gsi_ctx) {
  1607. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1608. return -GSI_STATUS_NODEV;
  1609. }
  1610. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  1611. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  1612. db_addr_wp_lsb);
  1613. return -GSI_STATUS_INVALID_PARAMS;
  1614. }
  1615. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1616. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1617. return -GSI_STATUS_INVALID_PARAMS;
  1618. }
  1619. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1620. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1621. GSIERR("bad state %d\n",
  1622. gsi_ctx->evtr[evt_ring_hdl].state);
  1623. return -GSI_STATUS_UNSUPPORTED_OP;
  1624. }
  1625. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  1626. GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
  1627. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  1628. GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
  1629. return GSI_STATUS_SUCCESS;
  1630. }
  1631. EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
  1632. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
  1633. {
  1634. struct gsi_evt_ctx *ctx;
  1635. if (!gsi_ctx) {
  1636. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1637. return -GSI_STATUS_NODEV;
  1638. }
  1639. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1640. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1641. return -GSI_STATUS_INVALID_PARAMS;
  1642. }
  1643. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1644. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1645. GSIERR("bad state %d\n",
  1646. gsi_ctx->evtr[evt_ring_hdl].state);
  1647. return -GSI_STATUS_UNSUPPORTED_OP;
  1648. }
  1649. ctx->ring.wp_local = value;
  1650. gsi_ring_evt_doorbell(ctx);
  1651. return GSI_STATUS_SUCCESS;
  1652. }
  1653. EXPORT_SYMBOL(gsi_ring_evt_ring_db);
  1654. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
  1655. {
  1656. struct gsi_chan_ctx *ctx;
  1657. uint32_t val;
  1658. if (!gsi_ctx) {
  1659. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1660. return -GSI_STATUS_NODEV;
  1661. }
  1662. if (chan_hdl >= gsi_ctx->max_ch) {
  1663. GSIERR("bad chan_hdl=%lu\n", chan_hdl);
  1664. return -GSI_STATUS_INVALID_PARAMS;
  1665. }
  1666. ctx = &gsi_ctx->chan[chan_hdl];
  1667. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  1668. GSIERR("bad state %d\n", ctx->state);
  1669. return -GSI_STATUS_UNSUPPORTED_OP;
  1670. }
  1671. ctx->ring.wp_local = value;
  1672. /* write MSB first */
  1673. val = ((ctx->ring.wp_local >> 32) &
  1674. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  1675. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  1676. gsi_writel(val, gsi_ctx->base +
  1677. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
  1678. gsi_ctx->per.ee));
  1679. gsi_ring_chan_doorbell(ctx);
  1680. return GSI_STATUS_SUCCESS;
  1681. }
  1682. EXPORT_SYMBOL(gsi_ring_ch_ring_db);
  1683. int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
  1684. {
  1685. uint32_t val;
  1686. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
  1687. struct gsi_evt_ctx *ctx;
  1688. int res;
  1689. if (!gsi_ctx) {
  1690. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1691. return -GSI_STATUS_NODEV;
  1692. }
  1693. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1694. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1695. return -GSI_STATUS_INVALID_PARAMS;
  1696. }
  1697. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1698. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1699. GSIERR("bad state %d\n", ctx->state);
  1700. return -GSI_STATUS_UNSUPPORTED_OP;
  1701. }
  1702. mutex_lock(&gsi_ctx->mlock);
  1703. reinit_completion(&ctx->compl);
  1704. val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1705. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1706. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1707. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1708. gsi_writel(val, gsi_ctx->base +
  1709. GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
  1710. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1711. if (res == 0) {
  1712. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1713. mutex_unlock(&gsi_ctx->mlock);
  1714. return -GSI_STATUS_TIMED_OUT;
  1715. }
  1716. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1717. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1718. ctx->state);
  1719. /*
  1720. * IPA Hardware returned GSI RING not allocated, which is
  1721. * unexpected. Indicates hardware instability.
  1722. */
  1723. GSI_ASSERT();
  1724. }
  1725. gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
  1726. gsi_init_evt_ring(&ctx->props, &ctx->ring);
  1727. /* restore scratch */
  1728. __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
  1729. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
  1730. gsi_prime_evt_ring(ctx);
  1731. if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
  1732. gsi_prime_evt_ring_wdi(ctx);
  1733. mutex_unlock(&gsi_ctx->mlock);
  1734. return GSI_STATUS_SUCCESS;
  1735. }
  1736. EXPORT_SYMBOL(gsi_reset_evt_ring);
  1737. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1738. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1739. {
  1740. struct gsi_evt_ctx *ctx;
  1741. if (!gsi_ctx) {
  1742. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1743. return -GSI_STATUS_NODEV;
  1744. }
  1745. if (!props || !scr) {
  1746. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  1747. return -GSI_STATUS_INVALID_PARAMS;
  1748. }
  1749. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1750. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1751. return -GSI_STATUS_INVALID_PARAMS;
  1752. }
  1753. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1754. if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1755. GSIERR("bad state %d\n", ctx->state);
  1756. return -GSI_STATUS_UNSUPPORTED_OP;
  1757. }
  1758. mutex_lock(&ctx->mlock);
  1759. *props = ctx->props;
  1760. *scr = ctx->scratch;
  1761. mutex_unlock(&ctx->mlock);
  1762. return GSI_STATUS_SUCCESS;
  1763. }
  1764. EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
  1765. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1766. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1767. {
  1768. struct gsi_evt_ctx *ctx;
  1769. if (!gsi_ctx) {
  1770. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1771. return -GSI_STATUS_NODEV;
  1772. }
  1773. if (!props || gsi_validate_evt_ring_props(props)) {
  1774. GSIERR("bad params props=%pK\n", props);
  1775. return -GSI_STATUS_INVALID_PARAMS;
  1776. }
  1777. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1778. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1779. return -GSI_STATUS_INVALID_PARAMS;
  1780. }
  1781. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1782. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1783. GSIERR("bad state %d\n", ctx->state);
  1784. return -GSI_STATUS_UNSUPPORTED_OP;
  1785. }
  1786. if (ctx->props.exclusive != props->exclusive) {
  1787. GSIERR("changing immutable fields not supported\n");
  1788. return -GSI_STATUS_UNSUPPORTED_OP;
  1789. }
  1790. mutex_lock(&ctx->mlock);
  1791. ctx->props = *props;
  1792. if (scr)
  1793. ctx->scratch = *scr;
  1794. mutex_unlock(&ctx->mlock);
  1795. return gsi_reset_evt_ring(evt_ring_hdl);
  1796. }
  1797. EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
  1798. static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
  1799. unsigned int ee)
  1800. {
  1801. uint32_t val;
  1802. val =
  1803. (((props->low_weight <<
  1804. GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1805. GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1806. ((props->max_prefetch <<
  1807. GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1808. GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1809. ((props->use_db_eng <<
  1810. GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1811. GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
  1812. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  1813. val |= ((props->prefetch_mode <<
  1814. GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT)
  1815. & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK);
  1816. gsi_writel(val, gsi_ctx->base +
  1817. GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1818. }
  1819. static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props,
  1820. unsigned int ee)
  1821. {
  1822. uint32_t val;
  1823. val =
  1824. (((props->low_weight <<
  1825. GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1826. GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1827. ((props->max_prefetch <<
  1828. GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1829. GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1830. ((props->use_db_eng <<
  1831. GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1832. GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
  1833. ((props->prefetch_mode <<
  1834. GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
  1835. GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
  1836. ((props->empty_lvl_threshold <<
  1837. GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
  1838. GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK));
  1839. gsi_writel(val, gsi_ctx->base +
  1840. GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1841. }
  1842. static void gsi_program_chan_ctx_qos_v2_9(struct gsi_chan_props *props,
  1843. unsigned int ee)
  1844. {
  1845. uint32_t val;
  1846. val =
  1847. (((props->low_weight <<
  1848. GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1849. GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1850. ((props->max_prefetch <<
  1851. GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1852. GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1853. ((props->use_db_eng <<
  1854. GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1855. GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
  1856. ((props->prefetch_mode <<
  1857. GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
  1858. GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
  1859. ((props->empty_lvl_threshold <<
  1860. GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
  1861. GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK) |
  1862. ((props->db_in_bytes <<
  1863. GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_SHFT) &
  1864. GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_BMSK));
  1865. gsi_writel(val, gsi_ctx->base +
  1866. GSI_V2_9_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1867. }
  1868. static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
  1869. uint8_t erindex)
  1870. {
  1871. uint32_t val;
  1872. uint32_t prot;
  1873. uint32_t prot_msb;
  1874. switch (props->prot) {
  1875. case GSI_CHAN_PROT_MHI:
  1876. case GSI_CHAN_PROT_XHCI:
  1877. case GSI_CHAN_PROT_GPI:
  1878. case GSI_CHAN_PROT_XDCI:
  1879. case GSI_CHAN_PROT_WDI2:
  1880. case GSI_CHAN_PROT_WDI3:
  1881. case GSI_CHAN_PROT_GCI:
  1882. case GSI_CHAN_PROT_MHIP:
  1883. prot_msb = 0;
  1884. break;
  1885. case GSI_CHAN_PROT_AQC:
  1886. case GSI_CHAN_PROT_11AD:
  1887. prot_msb = 1;
  1888. break;
  1889. default:
  1890. GSIERR("Unsupported protocol %d\n", props->prot);
  1891. WARN_ON(1);
  1892. return;
  1893. }
  1894. prot = props->prot;
  1895. val = ((prot <<
  1896. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) &
  1897. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK);
  1898. if (gsi_ctx->per.ver >= GSI_VER_2_5) {
  1899. val |= ((prot_msb <<
  1900. GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) &
  1901. GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK);
  1902. }
  1903. val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
  1904. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
  1905. ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
  1906. GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
  1907. ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
  1908. & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
  1909. gsi_writel(val, gsi_ctx->base +
  1910. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
  1911. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  1912. val = (props->ring_len &
  1913. GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1914. << GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1915. gsi_writel(val, gsi_ctx->base +
  1916. GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_OFFS(
  1917. props->ch_id, ee));
  1918. } else {
  1919. val = (props->ring_len &
  1920. GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1921. << GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1922. gsi_writel(val, gsi_ctx->base +
  1923. GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id,
  1924. ee));
  1925. }
  1926. val = (props->ring_base_addr &
  1927. GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
  1928. GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
  1929. gsi_writel(val, gsi_ctx->base +
  1930. GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
  1931. val = ((props->ring_base_addr >> 32) &
  1932. GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
  1933. GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
  1934. gsi_writel(val, gsi_ctx->base +
  1935. GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
  1936. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  1937. gsi_program_chan_ctx_qos_v2_9(props, ee);
  1938. else if (gsi_ctx->per.ver >= GSI_VER_2_5)
  1939. gsi_program_chan_ctx_qos_v2_5(props, ee);
  1940. else
  1941. gsi_program_chan_ctx_qos(props, ee);
  1942. }
  1943. static void gsi_init_chan_ring(struct gsi_chan_props *props,
  1944. struct gsi_ring_ctx *ctx)
  1945. {
  1946. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1947. ctx->base = props->ring_base_addr;
  1948. ctx->wp = ctx->base;
  1949. ctx->rp = ctx->base;
  1950. ctx->wp_local = ctx->base;
  1951. ctx->rp_local = ctx->base;
  1952. ctx->len = props->ring_len;
  1953. ctx->elem_sz = props->re_size;
  1954. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1955. ctx->end = ctx->base + (ctx->max_num_elem + 1) *
  1956. ctx->elem_sz;
  1957. }
  1958. static int gsi_validate_channel_props(struct gsi_chan_props *props)
  1959. {
  1960. uint64_t ra;
  1961. uint64_t last;
  1962. if (props->ch_id >= gsi_ctx->max_ch) {
  1963. GSIERR("ch_id %u invalid\n", props->ch_id);
  1964. return -GSI_STATUS_INVALID_PARAMS;
  1965. }
  1966. if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
  1967. props->ring_len % 4) ||
  1968. (props->re_size == GSI_CHAN_RE_SIZE_8B &&
  1969. props->ring_len % 8) ||
  1970. (props->re_size == GSI_CHAN_RE_SIZE_16B &&
  1971. props->ring_len % 16) ||
  1972. (props->re_size == GSI_CHAN_RE_SIZE_32B &&
  1973. props->ring_len % 32)) {
  1974. GSIERR("bad params ring_len %u not a multiple of re size %u\n",
  1975. props->ring_len, props->re_size);
  1976. return -GSI_STATUS_INVALID_PARAMS;
  1977. }
  1978. ra = props->ring_base_addr;
  1979. do_div(ra, roundup_pow_of_two(props->ring_len));
  1980. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1981. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1982. props->ring_base_addr,
  1983. roundup_pow_of_two(props->ring_len));
  1984. return -GSI_STATUS_INVALID_PARAMS;
  1985. }
  1986. last = props->ring_base_addr + props->ring_len - props->re_size;
  1987. /* MSB should stay same within the ring */
  1988. if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
  1989. (last & 0xFFFFFFFF00000000ULL)) {
  1990. GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
  1991. props->ring_base_addr,
  1992. props->ring_len);
  1993. return -GSI_STATUS_INVALID_PARAMS;
  1994. }
  1995. if (props->prot == GSI_CHAN_PROT_GPI &&
  1996. !props->ring_base_vaddr) {
  1997. GSIERR("protocol %u requires ring base VA\n", props->prot);
  1998. return -GSI_STATUS_INVALID_PARAMS;
  1999. }
  2000. if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
  2001. GSIERR("invalid channel low weight %u\n", props->low_weight);
  2002. return -GSI_STATUS_INVALID_PARAMS;
  2003. }
  2004. if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
  2005. GSIERR("xfer callback must be provided\n");
  2006. return -GSI_STATUS_INVALID_PARAMS;
  2007. }
  2008. if (!props->err_cb) {
  2009. GSIERR("err callback must be provided\n");
  2010. return -GSI_STATUS_INVALID_PARAMS;
  2011. }
  2012. return GSI_STATUS_SUCCESS;
  2013. }
  2014. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  2015. unsigned long *chan_hdl)
  2016. {
  2017. struct gsi_chan_ctx *ctx;
  2018. uint32_t val;
  2019. int res;
  2020. int ee;
  2021. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2022. uint8_t erindex;
  2023. struct gsi_user_data *user_data;
  2024. size_t user_data_size;
  2025. if (!gsi_ctx) {
  2026. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2027. return -GSI_STATUS_NODEV;
  2028. }
  2029. if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  2030. GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
  2031. props, dev_hdl, chan_hdl);
  2032. return -GSI_STATUS_INVALID_PARAMS;
  2033. }
  2034. if (gsi_validate_channel_props(props)) {
  2035. GSIERR("bad params\n");
  2036. return -GSI_STATUS_INVALID_PARAMS;
  2037. }
  2038. if (props->evt_ring_hdl != ~0) {
  2039. if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
  2040. GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
  2041. return -GSI_STATUS_INVALID_PARAMS;
  2042. }
  2043. if (atomic_read(
  2044. &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
  2045. gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
  2046. gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
  2047. GSI_CHAN_PROT_GCI) {
  2048. GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
  2049. props->evt_ring_hdl, chan_hdl);
  2050. return -GSI_STATUS_UNSUPPORTED_OP;
  2051. }
  2052. }
  2053. ctx = &gsi_ctx->chan[props->ch_id];
  2054. if (ctx->allocated) {
  2055. GSIERR("chan %d already allocated\n", props->ch_id);
  2056. return -GSI_STATUS_NODEV;
  2057. }
  2058. memset(ctx, 0, sizeof(*ctx));
  2059. /* For IPA offloaded WDI channels not required user_data pointer */
  2060. if (props->prot != GSI_CHAN_PROT_WDI2 &&
  2061. props->prot != GSI_CHAN_PROT_WDI3)
  2062. user_data_size = props->ring_len / props->re_size;
  2063. else
  2064. user_data_size = props->re_size;
  2065. /*
  2066. * GCI channels might have OOO event completions up to GSI_VEID_MAX.
  2067. * user_data needs to be large enough to accommodate those.
  2068. * TODO: increase user data size if GSI_VEID_MAX is not enough
  2069. */
  2070. if (props->prot == GSI_CHAN_PROT_GCI)
  2071. user_data_size += GSI_VEID_MAX;
  2072. user_data = devm_kzalloc(gsi_ctx->dev,
  2073. user_data_size * sizeof(*user_data),
  2074. GFP_KERNEL);
  2075. if (user_data == NULL) {
  2076. GSIERR("context not allocated\n");
  2077. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2078. }
  2079. mutex_init(&ctx->mlock);
  2080. init_completion(&ctx->compl);
  2081. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2082. ctx->props = *props;
  2083. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2084. mutex_lock(&gsi_ctx->mlock);
  2085. ee = gsi_ctx->per.ee;
  2086. gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
  2087. val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2088. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2089. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2090. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2091. gsi_writel(val, gsi_ctx->base +
  2092. GSI_EE_n_GSI_CH_CMD_OFFS(ee));
  2093. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2094. if (res == 0) {
  2095. GSIERR("chan_hdl=%u timed out\n", props->ch_id);
  2096. mutex_unlock(&gsi_ctx->mlock);
  2097. devm_kfree(gsi_ctx->dev, user_data);
  2098. return -GSI_STATUS_TIMED_OUT;
  2099. }
  2100. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2101. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2102. props->ch_id, ctx->state);
  2103. mutex_unlock(&gsi_ctx->mlock);
  2104. devm_kfree(gsi_ctx->dev, user_data);
  2105. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2106. }
  2107. mutex_unlock(&gsi_ctx->mlock);
  2108. } else {
  2109. mutex_lock(&gsi_ctx->mlock);
  2110. ctx->state = GSI_CHAN_STATE_ALLOCATED;
  2111. mutex_unlock(&gsi_ctx->mlock);
  2112. }
  2113. erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
  2114. GSI_NO_EVT_ERINDEX;
  2115. if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
  2116. GSIERR("invalid erindex %u\n", erindex);
  2117. devm_kfree(gsi_ctx->dev, user_data);
  2118. return -GSI_STATUS_INVALID_PARAMS;
  2119. }
  2120. if (erindex < GSI_EVT_RING_MAX) {
  2121. ctx->evtr = &gsi_ctx->evtr[erindex];
  2122. if (props->prot != GSI_CHAN_PROT_GCI)
  2123. atomic_inc(&ctx->evtr->chan_ref_cnt);
  2124. if (props->prot != GSI_CHAN_PROT_GCI &&
  2125. ctx->evtr->props.exclusive &&
  2126. atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
  2127. ctx->evtr->chan = ctx;
  2128. }
  2129. gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
  2130. spin_lock_init(&ctx->ring.slock);
  2131. gsi_init_chan_ring(props, &ctx->ring);
  2132. if (!props->max_re_expected)
  2133. ctx->props.max_re_expected = ctx->ring.max_num_elem;
  2134. ctx->user_data = user_data;
  2135. *chan_hdl = props->ch_id;
  2136. ctx->allocated = true;
  2137. ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
  2138. atomic_inc(&gsi_ctx->num_chan);
  2139. if (props->prot == GSI_CHAN_PROT_GCI) {
  2140. gsi_ctx->coal_info.ch_id = props->ch_id;
  2141. gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
  2142. }
  2143. return GSI_STATUS_SUCCESS;
  2144. }
  2145. EXPORT_SYMBOL(gsi_alloc_channel);
  2146. static int gsi_alloc_ap_channel(unsigned int chan_hdl)
  2147. {
  2148. struct gsi_chan_ctx *ctx;
  2149. uint32_t val;
  2150. int res;
  2151. int ee;
  2152. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2153. if (!gsi_ctx) {
  2154. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2155. return -GSI_STATUS_NODEV;
  2156. }
  2157. ctx = &gsi_ctx->chan[chan_hdl];
  2158. if (ctx->allocated) {
  2159. GSIERR("chan %d already allocated\n", chan_hdl);
  2160. return -GSI_STATUS_NODEV;
  2161. }
  2162. memset(ctx, 0, sizeof(*ctx));
  2163. mutex_init(&ctx->mlock);
  2164. init_completion(&ctx->compl);
  2165. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2166. mutex_lock(&gsi_ctx->mlock);
  2167. ee = gsi_ctx->per.ee;
  2168. gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
  2169. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2170. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2171. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2172. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2173. gsi_writel(val, gsi_ctx->base +
  2174. GSI_EE_n_GSI_CH_CMD_OFFS(ee));
  2175. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2176. if (res == 0) {
  2177. GSIERR("chan_hdl=%u timed out\n", chan_hdl);
  2178. mutex_unlock(&gsi_ctx->mlock);
  2179. return -GSI_STATUS_TIMED_OUT;
  2180. }
  2181. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2182. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2183. chan_hdl, ctx->state);
  2184. mutex_unlock(&gsi_ctx->mlock);
  2185. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2186. }
  2187. mutex_unlock(&gsi_ctx->mlock);
  2188. return GSI_STATUS_SUCCESS;
  2189. }
  2190. static void __gsi_write_channel_scratch(unsigned long chan_hdl,
  2191. union __packed gsi_channel_scratch val)
  2192. {
  2193. gsi_writel(val.data.word1, gsi_ctx->base +
  2194. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2195. gsi_ctx->per.ee));
  2196. gsi_writel(val.data.word2, gsi_ctx->base +
  2197. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2198. gsi_ctx->per.ee));
  2199. gsi_writel(val.data.word3, gsi_ctx->base +
  2200. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2201. gsi_ctx->per.ee));
  2202. gsi_writel(val.data.word4, gsi_ctx->base +
  2203. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2204. gsi_ctx->per.ee));
  2205. }
  2206. static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2207. union __packed gsi_wdi3_channel_scratch2_reg val)
  2208. {
  2209. gsi_writel(val.data.word1, gsi_ctx->base +
  2210. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2211. gsi_ctx->per.ee));
  2212. }
  2213. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  2214. union __packed gsi_wdi_channel_scratch3_reg val)
  2215. {
  2216. struct gsi_chan_ctx *ctx;
  2217. if (!gsi_ctx) {
  2218. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2219. return -GSI_STATUS_NODEV;
  2220. }
  2221. if (chan_hdl >= gsi_ctx->max_ch) {
  2222. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2223. return -GSI_STATUS_INVALID_PARAMS;
  2224. }
  2225. ctx = &gsi_ctx->chan[chan_hdl];
  2226. mutex_lock(&ctx->mlock);
  2227. ctx->scratch.wdi.endp_metadatareg_offset =
  2228. val.wdi.endp_metadatareg_offset;
  2229. ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
  2230. gsi_writel(val.data.word1, gsi_ctx->base +
  2231. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2232. gsi_ctx->per.ee));
  2233. mutex_unlock(&ctx->mlock);
  2234. return GSI_STATUS_SUCCESS;
  2235. }
  2236. EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
  2237. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  2238. union __packed gsi_wdi2_channel_scratch2_reg val)
  2239. {
  2240. struct gsi_chan_ctx *ctx;
  2241. if (!gsi_ctx) {
  2242. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2243. return -GSI_STATUS_NODEV;
  2244. }
  2245. if (chan_hdl >= gsi_ctx->max_ch) {
  2246. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2247. return -GSI_STATUS_INVALID_PARAMS;
  2248. }
  2249. ctx = &gsi_ctx->chan[chan_hdl];
  2250. mutex_lock(&ctx->mlock);
  2251. ctx->scratch.wdi2_new.endp_metadatareg_offset =
  2252. val.wdi.endp_metadatareg_offset;
  2253. ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
  2254. val.wdi.update_ri_moderation_threshold =
  2255. ctx->scratch.wdi2_new.update_ri_moderation_threshold;
  2256. gsi_writel(val.data.word1, gsi_ctx->base +
  2257. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2258. gsi_ctx->per.ee));
  2259. mutex_unlock(&ctx->mlock);
  2260. return GSI_STATUS_SUCCESS;
  2261. }
  2262. EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
  2263. static void __gsi_read_channel_scratch(unsigned long chan_hdl,
  2264. union __packed gsi_channel_scratch * val)
  2265. {
  2266. val->data.word1 = gsi_readl(gsi_ctx->base +
  2267. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2268. gsi_ctx->per.ee));
  2269. val->data.word2 = gsi_readl(gsi_ctx->base +
  2270. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2271. gsi_ctx->per.ee));
  2272. val->data.word3 = gsi_readl(gsi_ctx->base +
  2273. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2274. gsi_ctx->per.ee));
  2275. val->data.word4 = gsi_readl(gsi_ctx->base +
  2276. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2277. gsi_ctx->per.ee));
  2278. }
  2279. static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2280. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2281. {
  2282. val->data.word1 = gsi_readl(gsi_ctx->base +
  2283. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2284. gsi_ctx->per.ee));
  2285. }
  2286. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  2287. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
  2288. {
  2289. union __packed gsi_channel_scratch scr;
  2290. /* below sequence is not atomic. assumption is sequencer specific fields
  2291. * will remain unchanged across this sequence
  2292. */
  2293. /* READ */
  2294. scr.data.word1 = gsi_readl(gsi_ctx->base +
  2295. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2296. gsi_ctx->per.ee));
  2297. scr.data.word2 = gsi_readl(gsi_ctx->base +
  2298. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2299. gsi_ctx->per.ee));
  2300. scr.data.word3 = gsi_readl(gsi_ctx->base +
  2301. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2302. gsi_ctx->per.ee));
  2303. scr.data.word4 = gsi_readl(gsi_ctx->base +
  2304. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2305. gsi_ctx->per.ee));
  2306. /* UPDATE */
  2307. scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr;
  2308. scr.mhi.assert_bit40 = mscr.assert_bit40;
  2309. scr.mhi.polling_configuration = mscr.polling_configuration;
  2310. scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled;
  2311. scr.mhi.polling_mode = mscr.polling_mode;
  2312. scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold;
  2313. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  2314. scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
  2315. scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
  2316. }
  2317. /* WRITE */
  2318. gsi_writel(scr.data.word1, gsi_ctx->base +
  2319. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2320. gsi_ctx->per.ee));
  2321. gsi_writel(scr.data.word2, gsi_ctx->base +
  2322. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2323. gsi_ctx->per.ee));
  2324. gsi_writel(scr.data.word3, gsi_ctx->base +
  2325. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2326. gsi_ctx->per.ee));
  2327. gsi_writel(scr.data.word4, gsi_ctx->base +
  2328. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2329. gsi_ctx->per.ee));
  2330. return scr;
  2331. }
  2332. int gsi_write_channel_scratch(unsigned long chan_hdl,
  2333. union __packed gsi_channel_scratch val)
  2334. {
  2335. struct gsi_chan_ctx *ctx;
  2336. if (!gsi_ctx) {
  2337. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2338. return -GSI_STATUS_NODEV;
  2339. }
  2340. if (chan_hdl >= gsi_ctx->max_ch) {
  2341. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2342. return -GSI_STATUS_INVALID_PARAMS;
  2343. }
  2344. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2345. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2346. GSIERR("bad state %d\n",
  2347. gsi_ctx->chan[chan_hdl].state);
  2348. return -GSI_STATUS_UNSUPPORTED_OP;
  2349. }
  2350. ctx = &gsi_ctx->chan[chan_hdl];
  2351. mutex_lock(&ctx->mlock);
  2352. ctx->scratch = val;
  2353. __gsi_write_channel_scratch(chan_hdl, val);
  2354. mutex_unlock(&ctx->mlock);
  2355. return GSI_STATUS_SUCCESS;
  2356. }
  2357. EXPORT_SYMBOL(gsi_write_channel_scratch);
  2358. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2359. union __packed gsi_wdi3_channel_scratch2_reg val)
  2360. {
  2361. struct gsi_chan_ctx *ctx;
  2362. if (!gsi_ctx) {
  2363. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2364. return -GSI_STATUS_NODEV;
  2365. }
  2366. if (chan_hdl >= gsi_ctx->max_ch) {
  2367. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2368. return -GSI_STATUS_INVALID_PARAMS;
  2369. }
  2370. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2371. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2372. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2373. GSIERR("bad state %d\n",
  2374. gsi_ctx->chan[chan_hdl].state);
  2375. return -GSI_STATUS_UNSUPPORTED_OP;
  2376. }
  2377. ctx = &gsi_ctx->chan[chan_hdl];
  2378. mutex_lock(&ctx->mlock);
  2379. ctx->scratch.data.word3 = val.data.word1;
  2380. __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
  2381. mutex_unlock(&ctx->mlock);
  2382. return GSI_STATUS_SUCCESS;
  2383. }
  2384. EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
  2385. int gsi_read_channel_scratch(unsigned long chan_hdl,
  2386. union __packed gsi_channel_scratch *val)
  2387. {
  2388. struct gsi_chan_ctx *ctx;
  2389. if (!gsi_ctx) {
  2390. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2391. return -GSI_STATUS_NODEV;
  2392. }
  2393. if (chan_hdl >= gsi_ctx->max_ch) {
  2394. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2395. return -GSI_STATUS_INVALID_PARAMS;
  2396. }
  2397. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2398. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2399. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2400. GSIERR("bad state %d\n",
  2401. gsi_ctx->chan[chan_hdl].state);
  2402. return -GSI_STATUS_UNSUPPORTED_OP;
  2403. }
  2404. ctx = &gsi_ctx->chan[chan_hdl];
  2405. mutex_lock(&ctx->mlock);
  2406. __gsi_read_channel_scratch(chan_hdl, val);
  2407. mutex_unlock(&ctx->mlock);
  2408. return GSI_STATUS_SUCCESS;
  2409. }
  2410. EXPORT_SYMBOL(gsi_read_channel_scratch);
  2411. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2412. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2413. {
  2414. struct gsi_chan_ctx *ctx;
  2415. if (!gsi_ctx) {
  2416. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2417. return -GSI_STATUS_NODEV;
  2418. }
  2419. if (chan_hdl >= gsi_ctx->max_ch) {
  2420. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2421. return -GSI_STATUS_INVALID_PARAMS;
  2422. }
  2423. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2424. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2425. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2426. GSIERR("bad state %d\n",
  2427. gsi_ctx->chan[chan_hdl].state);
  2428. return -GSI_STATUS_UNSUPPORTED_OP;
  2429. }
  2430. ctx = &gsi_ctx->chan[chan_hdl];
  2431. mutex_lock(&ctx->mlock);
  2432. __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
  2433. mutex_unlock(&ctx->mlock);
  2434. return GSI_STATUS_SUCCESS;
  2435. }
  2436. EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
  2437. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  2438. struct __packed gsi_mhi_channel_scratch mscr)
  2439. {
  2440. struct gsi_chan_ctx *ctx;
  2441. if (!gsi_ctx) {
  2442. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2443. return -GSI_STATUS_NODEV;
  2444. }
  2445. if (chan_hdl >= gsi_ctx->max_ch) {
  2446. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2447. return -GSI_STATUS_INVALID_PARAMS;
  2448. }
  2449. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2450. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2451. GSIERR("bad state %d\n",
  2452. gsi_ctx->chan[chan_hdl].state);
  2453. return -GSI_STATUS_UNSUPPORTED_OP;
  2454. }
  2455. ctx = &gsi_ctx->chan[chan_hdl];
  2456. mutex_lock(&ctx->mlock);
  2457. ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
  2458. mutex_unlock(&ctx->mlock);
  2459. return GSI_STATUS_SUCCESS;
  2460. }
  2461. EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
  2462. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2463. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2464. {
  2465. if (!gsi_ctx) {
  2466. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2467. return -GSI_STATUS_NODEV;
  2468. }
  2469. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2470. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2471. db_addr_wp_lsb);
  2472. return -GSI_STATUS_INVALID_PARAMS;
  2473. }
  2474. if (chan_hdl >= gsi_ctx->max_ch) {
  2475. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2476. return -GSI_STATUS_INVALID_PARAMS;
  2477. }
  2478. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  2479. GSIERR("bad state %d\n",
  2480. gsi_ctx->chan[chan_hdl].state);
  2481. return -GSI_STATUS_UNSUPPORTED_OP;
  2482. }
  2483. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  2484. GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
  2485. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  2486. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
  2487. return GSI_STATUS_SUCCESS;
  2488. }
  2489. EXPORT_SYMBOL(gsi_query_channel_db_addr);
  2490. int gsi_start_channel(unsigned long chan_hdl)
  2491. {
  2492. enum gsi_ch_cmd_opcode op = GSI_CH_START;
  2493. uint32_t val;
  2494. struct gsi_chan_ctx *ctx;
  2495. if (!gsi_ctx) {
  2496. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2497. return -GSI_STATUS_NODEV;
  2498. }
  2499. if (chan_hdl >= gsi_ctx->max_ch) {
  2500. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2501. return -GSI_STATUS_INVALID_PARAMS;
  2502. }
  2503. ctx = &gsi_ctx->chan[chan_hdl];
  2504. if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
  2505. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2506. ctx->state != GSI_CHAN_STATE_STOPPED) {
  2507. GSIERR("bad state %d\n", ctx->state);
  2508. return -GSI_STATUS_UNSUPPORTED_OP;
  2509. }
  2510. mutex_lock(&gsi_ctx->mlock);
  2511. reinit_completion(&ctx->compl);
  2512. /* check if INTSET is in IRQ mode for GPI channel */
  2513. val = gsi_readl(gsi_ctx->base +
  2514. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  2515. if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2516. val != GSI_INTR_IRQ) {
  2517. GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
  2518. BUG();
  2519. }
  2520. gsi_ctx->ch_dbg[chan_hdl].ch_start++;
  2521. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2522. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2523. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2524. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2525. gsi_writel(val, gsi_ctx->base +
  2526. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2527. GSIDBG("GSI Channel Start, waiting for completion\n");
  2528. gsi_channel_state_change_wait(chan_hdl,
  2529. ctx,
  2530. GSI_START_CMD_TIMEOUT_MS, op);
  2531. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  2532. /*
  2533. * Hardware returned unexpected status, unexpected
  2534. * hardware state.
  2535. */
  2536. GSIERR("chan=%lu timed out, unexpected state=%u\n",
  2537. chan_hdl, ctx->state);
  2538. GSI_ASSERT();
  2539. }
  2540. GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
  2541. /* write order MUST be MSB followed by LSB */
  2542. val = ((ctx->ring.wp_local >> 32) &
  2543. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  2544. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  2545. gsi_writel(val, gsi_ctx->base +
  2546. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
  2547. gsi_ctx->per.ee));
  2548. mutex_unlock(&gsi_ctx->mlock);
  2549. return GSI_STATUS_SUCCESS;
  2550. }
  2551. EXPORT_SYMBOL(gsi_start_channel);
  2552. int gsi_stop_channel(unsigned long chan_hdl)
  2553. {
  2554. enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
  2555. int res;
  2556. uint32_t val;
  2557. struct gsi_chan_ctx *ctx;
  2558. if (!gsi_ctx) {
  2559. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2560. return -GSI_STATUS_NODEV;
  2561. }
  2562. if (chan_hdl >= gsi_ctx->max_ch) {
  2563. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2564. return -GSI_STATUS_INVALID_PARAMS;
  2565. }
  2566. ctx = &gsi_ctx->chan[chan_hdl];
  2567. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2568. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2569. return GSI_STATUS_SUCCESS;
  2570. }
  2571. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2572. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2573. ctx->state != GSI_CHAN_STATE_ERROR) {
  2574. GSIERR("bad state %d\n", ctx->state);
  2575. return -GSI_STATUS_UNSUPPORTED_OP;
  2576. }
  2577. mutex_lock(&gsi_ctx->mlock);
  2578. reinit_completion(&ctx->compl);
  2579. /* check if INTSET is in IRQ mode for GPI channel */
  2580. val = gsi_readl(gsi_ctx->base +
  2581. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  2582. if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2583. val != GSI_INTR_IRQ) {
  2584. GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
  2585. BUG();
  2586. }
  2587. gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
  2588. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2589. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2590. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2591. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2592. gsi_writel(val, gsi_ctx->base +
  2593. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2594. GSIDBG("GSI Channel Stop, waiting for completion\n");
  2595. gsi_channel_state_change_wait(chan_hdl,
  2596. ctx,
  2597. GSI_STOP_CMD_TIMEOUT_MS, op);
  2598. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2599. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2600. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2601. res = -GSI_STATUS_BAD_STATE;
  2602. BUG();
  2603. goto free_lock;
  2604. }
  2605. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2606. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2607. res = -GSI_STATUS_AGAIN;
  2608. goto free_lock;
  2609. }
  2610. res = GSI_STATUS_SUCCESS;
  2611. free_lock:
  2612. mutex_unlock(&gsi_ctx->mlock);
  2613. return res;
  2614. }
  2615. EXPORT_SYMBOL(gsi_stop_channel);
  2616. int gsi_stop_db_channel(unsigned long chan_hdl)
  2617. {
  2618. enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
  2619. int res;
  2620. uint32_t val;
  2621. struct gsi_chan_ctx *ctx;
  2622. if (!gsi_ctx) {
  2623. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2624. return -GSI_STATUS_NODEV;
  2625. }
  2626. if (chan_hdl >= gsi_ctx->max_ch) {
  2627. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2628. return -GSI_STATUS_INVALID_PARAMS;
  2629. }
  2630. ctx = &gsi_ctx->chan[chan_hdl];
  2631. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2632. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2633. return GSI_STATUS_SUCCESS;
  2634. }
  2635. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2636. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2637. GSIERR("bad state %d\n", ctx->state);
  2638. return -GSI_STATUS_UNSUPPORTED_OP;
  2639. }
  2640. mutex_lock(&gsi_ctx->mlock);
  2641. reinit_completion(&ctx->compl);
  2642. gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
  2643. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2644. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2645. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2646. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2647. gsi_writel(val, gsi_ctx->base +
  2648. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2649. res = wait_for_completion_timeout(&ctx->compl,
  2650. msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
  2651. if (res == 0) {
  2652. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2653. res = -GSI_STATUS_TIMED_OUT;
  2654. goto free_lock;
  2655. }
  2656. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2657. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2658. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2659. res = -GSI_STATUS_BAD_STATE;
  2660. goto free_lock;
  2661. }
  2662. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2663. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2664. res = -GSI_STATUS_AGAIN;
  2665. goto free_lock;
  2666. }
  2667. res = GSI_STATUS_SUCCESS;
  2668. free_lock:
  2669. mutex_unlock(&gsi_ctx->mlock);
  2670. return res;
  2671. }
  2672. EXPORT_SYMBOL(gsi_stop_db_channel);
  2673. int gsi_reset_channel(unsigned long chan_hdl)
  2674. {
  2675. enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
  2676. int res;
  2677. uint32_t val;
  2678. struct gsi_chan_ctx *ctx;
  2679. bool reset_done = false;
  2680. uint32_t retry_cnt = 0;
  2681. if (!gsi_ctx) {
  2682. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2683. return -GSI_STATUS_NODEV;
  2684. }
  2685. if (chan_hdl >= gsi_ctx->max_ch) {
  2686. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2687. return -GSI_STATUS_INVALID_PARAMS;
  2688. }
  2689. ctx = &gsi_ctx->chan[chan_hdl];
  2690. /*
  2691. * In WDI3 case, if SAP enabled but no client connected,
  2692. * GSI will be in allocated state. When SAP disabled,
  2693. * gsi_reset_channel will be called and reset is needed.
  2694. */
  2695. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2696. ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2697. GSIERR("bad state %d\n", ctx->state);
  2698. return -GSI_STATUS_UNSUPPORTED_OP;
  2699. }
  2700. mutex_lock(&gsi_ctx->mlock);
  2701. reset:
  2702. reinit_completion(&ctx->compl);
  2703. gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
  2704. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2705. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2706. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2707. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2708. gsi_writel(val, gsi_ctx->base +
  2709. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2710. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2711. if (res == 0) {
  2712. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2713. mutex_unlock(&gsi_ctx->mlock);
  2714. return -GSI_STATUS_TIMED_OUT;
  2715. }
  2716. revrfy_chnlstate:
  2717. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2718. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2719. ctx->state);
  2720. /* GSI register update state not sync with gsi channel
  2721. * context state not sync, need to wait for 1ms to sync.
  2722. */
  2723. retry_cnt++;
  2724. if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
  2725. usleep_range(GSI_RESET_WA_MIN_SLEEP,
  2726. GSI_RESET_WA_MAX_SLEEP);
  2727. goto revrfy_chnlstate;
  2728. }
  2729. /*
  2730. * Hardware returned incorrect state, unexpected
  2731. * hardware state.
  2732. */
  2733. GSI_ASSERT();
  2734. }
  2735. /* Hardware issue fixed from GSI 2.0 and no need for the WA */
  2736. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  2737. reset_done = true;
  2738. /* workaround: reset GSI producers again */
  2739. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
  2740. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  2741. reset_done = true;
  2742. goto reset;
  2743. }
  2744. if (ctx->props.cleanup_cb)
  2745. gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
  2746. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  2747. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  2748. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  2749. /* restore scratch */
  2750. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  2751. mutex_unlock(&gsi_ctx->mlock);
  2752. return GSI_STATUS_SUCCESS;
  2753. }
  2754. EXPORT_SYMBOL(gsi_reset_channel);
  2755. int gsi_dealloc_channel(unsigned long chan_hdl)
  2756. {
  2757. enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
  2758. int res;
  2759. uint32_t val;
  2760. struct gsi_chan_ctx *ctx;
  2761. if (!gsi_ctx) {
  2762. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2763. return -GSI_STATUS_NODEV;
  2764. }
  2765. if (chan_hdl >= gsi_ctx->max_ch) {
  2766. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2767. return -GSI_STATUS_INVALID_PARAMS;
  2768. }
  2769. ctx = &gsi_ctx->chan[chan_hdl];
  2770. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2771. GSIERR("bad state %d\n", ctx->state);
  2772. return -GSI_STATUS_UNSUPPORTED_OP;
  2773. }
  2774. /*In GSI_VER_2_2 version deallocation channel not supported*/
  2775. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2776. mutex_lock(&gsi_ctx->mlock);
  2777. reinit_completion(&ctx->compl);
  2778. gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
  2779. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2780. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2781. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2782. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2783. gsi_writel(val, gsi_ctx->base +
  2784. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2785. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2786. if (res == 0) {
  2787. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2788. mutex_unlock(&gsi_ctx->mlock);
  2789. return -GSI_STATUS_TIMED_OUT;
  2790. }
  2791. if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
  2792. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2793. ctx->state);
  2794. /* Hardware returned incorrect value */
  2795. GSI_ASSERT();
  2796. }
  2797. mutex_unlock(&gsi_ctx->mlock);
  2798. } else {
  2799. mutex_lock(&gsi_ctx->mlock);
  2800. GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
  2801. ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
  2802. GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
  2803. ctx->state);
  2804. mutex_unlock(&gsi_ctx->mlock);
  2805. }
  2806. devm_kfree(gsi_ctx->dev, ctx->user_data);
  2807. ctx->allocated = false;
  2808. if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
  2809. atomic_dec(&ctx->evtr->chan_ref_cnt);
  2810. atomic_dec(&gsi_ctx->num_chan);
  2811. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  2812. gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
  2813. gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
  2814. }
  2815. return GSI_STATUS_SUCCESS;
  2816. }
  2817. EXPORT_SYMBOL(gsi_dealloc_channel);
  2818. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
  2819. {
  2820. unsigned long now = jiffies_to_msecs(jiffies);
  2821. unsigned long elapsed;
  2822. if (used == 0) {
  2823. elapsed = now - ctx->stats.dp.last_timestamp;
  2824. if (ctx->stats.dp.empty_time < elapsed)
  2825. ctx->stats.dp.empty_time = elapsed;
  2826. }
  2827. if (used <= ctx->props.max_re_expected / 3)
  2828. ++ctx->stats.dp.ch_below_lo;
  2829. else if (used <= 2 * ctx->props.max_re_expected / 3)
  2830. ++ctx->stats.dp.ch_below_hi;
  2831. else
  2832. ++ctx->stats.dp.ch_above_hi;
  2833. ctx->stats.dp.last_timestamp = now;
  2834. }
  2835. static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
  2836. uint16_t *num_free_re)
  2837. {
  2838. uint16_t start;
  2839. uint16_t end;
  2840. uint64_t rp;
  2841. int ee = gsi_ctx->per.ee;
  2842. uint16_t used;
  2843. WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
  2844. if (!ctx->evtr) {
  2845. rp = gsi_readl(gsi_ctx->base +
  2846. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  2847. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  2848. ctx->ring.rp = rp;
  2849. } else {
  2850. rp = ctx->ring.rp_local;
  2851. }
  2852. start = gsi_find_idx_from_addr(&ctx->ring, rp);
  2853. end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  2854. if (end >= start)
  2855. used = end - start;
  2856. else
  2857. used = ctx->ring.max_num_elem + 1 - (start - end);
  2858. *num_free_re = ctx->ring.max_num_elem - used;
  2859. }
  2860. int gsi_query_channel_info(unsigned long chan_hdl,
  2861. struct gsi_chan_info *info)
  2862. {
  2863. struct gsi_chan_ctx *ctx;
  2864. spinlock_t *slock;
  2865. unsigned long flags;
  2866. uint64_t rp;
  2867. uint64_t wp;
  2868. int ee;
  2869. if (!gsi_ctx) {
  2870. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2871. return -GSI_STATUS_NODEV;
  2872. }
  2873. if (chan_hdl >= gsi_ctx->max_ch || !info) {
  2874. GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
  2875. return -GSI_STATUS_INVALID_PARAMS;
  2876. }
  2877. ctx = &gsi_ctx->chan[chan_hdl];
  2878. if (ctx->evtr) {
  2879. slock = &ctx->evtr->ring.slock;
  2880. info->evt_valid = true;
  2881. } else {
  2882. slock = &ctx->ring.slock;
  2883. info->evt_valid = false;
  2884. }
  2885. spin_lock_irqsave(slock, flags);
  2886. ee = gsi_ctx->per.ee;
  2887. rp = gsi_readl(gsi_ctx->base +
  2888. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  2889. rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2890. GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
  2891. ctx->ring.rp = rp;
  2892. info->rp = rp;
  2893. wp = gsi_readl(gsi_ctx->base +
  2894. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
  2895. wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2896. GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
  2897. ctx->ring.wp = wp;
  2898. info->wp = wp;
  2899. if (info->evt_valid) {
  2900. rp = gsi_readl(gsi_ctx->base +
  2901. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
  2902. rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2903. GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
  2904. << 32;
  2905. info->evt_rp = rp;
  2906. wp = gsi_readl(gsi_ctx->base +
  2907. GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
  2908. wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2909. GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
  2910. << 32;
  2911. info->evt_wp = wp;
  2912. }
  2913. spin_unlock_irqrestore(slock, flags);
  2914. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
  2915. chan_hdl, info->rp, info->wp,
  2916. info->evt_valid, info->evt_rp, info->evt_wp);
  2917. return GSI_STATUS_SUCCESS;
  2918. }
  2919. EXPORT_SYMBOL(gsi_query_channel_info);
  2920. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
  2921. {
  2922. struct gsi_chan_ctx *ctx;
  2923. spinlock_t *slock;
  2924. unsigned long flags;
  2925. uint64_t rp;
  2926. uint64_t wp;
  2927. uint64_t rp_local;
  2928. int ee;
  2929. if (!gsi_ctx) {
  2930. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2931. return -GSI_STATUS_NODEV;
  2932. }
  2933. if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
  2934. GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
  2935. chan_hdl, is_empty);
  2936. return -GSI_STATUS_INVALID_PARAMS;
  2937. }
  2938. ctx = &gsi_ctx->chan[chan_hdl];
  2939. ee = gsi_ctx->per.ee;
  2940. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  2941. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  2942. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  2943. return -GSI_STATUS_UNSUPPORTED_OP;
  2944. }
  2945. if (ctx->evtr)
  2946. slock = &ctx->evtr->ring.slock;
  2947. else
  2948. slock = &ctx->ring.slock;
  2949. spin_lock_irqsave(slock, flags);
  2950. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
  2951. rp = gsi_readl(gsi_ctx->base +
  2952. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
  2953. rp |= ctx->evtr->ring.rp & 0xFFFFFFFF00000000;
  2954. ctx->evtr->ring.rp = rp;
  2955. wp = gsi_readl(gsi_ctx->base +
  2956. GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
  2957. wp |= ctx->evtr->ring.wp & 0xFFFFFFFF00000000;
  2958. ctx->evtr->ring.wp = wp;
  2959. rp_local = ctx->evtr->ring.rp_local;
  2960. } else {
  2961. rp = gsi_readl(gsi_ctx->base +
  2962. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  2963. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  2964. ctx->ring.rp = rp;
  2965. wp = gsi_readl(gsi_ctx->base +
  2966. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
  2967. wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
  2968. ctx->ring.wp = wp;
  2969. rp_local = ctx->ring.rp_local;
  2970. }
  2971. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  2972. *is_empty = (rp_local == rp) ? true : false;
  2973. else
  2974. *is_empty = (wp == rp) ? true : false;
  2975. spin_unlock_irqrestore(slock, flags);
  2976. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
  2977. GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  2978. chan_hdl, ctx->evtr->id, rp, wp, rp_local);
  2979. else
  2980. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  2981. chan_hdl, rp, wp, rp_local);
  2982. return GSI_STATUS_SUCCESS;
  2983. }
  2984. EXPORT_SYMBOL(gsi_is_channel_empty);
  2985. int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
  2986. {
  2987. int i;
  2988. int end;
  2989. if (!ctx->user_data[idx].valid) {
  2990. ctx->user_data[idx].valid = true;
  2991. return idx;
  2992. }
  2993. /*
  2994. * at this point we need to find an "escape buffer" for the cookie
  2995. * as the userdata in this spot is in use. This happens if the TRE at
  2996. * idx is not completed yet and it is getting reused by a new TRE.
  2997. */
  2998. ctx->stats.userdata_in_use++;
  2999. end = ctx->ring.max_num_elem + 1;
  3000. for (i = 0; i < GSI_VEID_MAX; i++) {
  3001. if (!ctx->user_data[end + i].valid) {
  3002. ctx->user_data[end + i].valid = true;
  3003. return end + i;
  3004. }
  3005. }
  3006. /* Go over original userdata when escape buffer is full (costly) */
  3007. GSIDBG("escape buffer is full\n");
  3008. for (i = 0; i < end; i++) {
  3009. if (!ctx->user_data[i].valid) {
  3010. ctx->user_data[i].valid = true;
  3011. return i;
  3012. }
  3013. }
  3014. /* Everything is full (possibly a stall) */
  3015. GSIERR("both userdata array and escape buffer is full\n");
  3016. BUG();
  3017. return 0xFFFF;
  3018. }
  3019. int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
  3020. struct gsi_xfer_elem *xfer)
  3021. {
  3022. struct gsi_gci_tre gci_tre;
  3023. struct gsi_gci_tre *tre_gci_ptr;
  3024. uint16_t idx;
  3025. memset(&gci_tre, 0, sizeof(gci_tre));
  3026. if (xfer->addr & 0xFFFFFF0000000000) {
  3027. GSIERR("chan_hdl=%u add too large=%llx\n",
  3028. ctx->props.ch_id, xfer->addr);
  3029. return -EINVAL;
  3030. }
  3031. if (xfer->type != GSI_XFER_ELEM_DATA) {
  3032. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3033. xfer->type);
  3034. return -EINVAL;
  3035. }
  3036. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3037. tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
  3038. idx * ctx->ring.elem_sz);
  3039. gci_tre.buffer_ptr = xfer->addr;
  3040. gci_tre.buf_len = xfer->len;
  3041. gci_tre.re_type = GSI_RE_COAL;
  3042. gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
  3043. if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
  3044. return -EPERM;
  3045. /* write the TRE to ring */
  3046. *tre_gci_ptr = gci_tre;
  3047. ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
  3048. return 0;
  3049. }
  3050. int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
  3051. struct gsi_xfer_elem *xfer)
  3052. {
  3053. struct gsi_tre tre;
  3054. struct gsi_tre *tre_ptr;
  3055. uint16_t idx;
  3056. memset(&tre, 0, sizeof(tre));
  3057. tre.buffer_ptr = xfer->addr;
  3058. tre.buf_len = xfer->len;
  3059. if (xfer->type == GSI_XFER_ELEM_DATA) {
  3060. tre.re_type = GSI_RE_XFER;
  3061. } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
  3062. tre.re_type = GSI_RE_IMMD_CMD;
  3063. } else if (xfer->type == GSI_XFER_ELEM_NOP) {
  3064. tre.re_type = GSI_RE_NOP;
  3065. } else {
  3066. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3067. xfer->type);
  3068. return -EINVAL;
  3069. }
  3070. tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
  3071. tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
  3072. tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
  3073. tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
  3074. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3075. tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
  3076. idx * ctx->ring.elem_sz);
  3077. /* write the TRE to ring */
  3078. *tre_ptr = tre;
  3079. ctx->user_data[idx].valid = true;
  3080. ctx->user_data[idx].p = xfer->xfer_user_data;
  3081. return 0;
  3082. }
  3083. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  3084. struct gsi_xfer_elem *xfer, bool ring_db)
  3085. {
  3086. struct gsi_chan_ctx *ctx;
  3087. uint16_t free;
  3088. uint64_t wp_rollback;
  3089. int i;
  3090. spinlock_t *slock;
  3091. unsigned long flags;
  3092. if (!gsi_ctx) {
  3093. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3094. return -GSI_STATUS_NODEV;
  3095. }
  3096. if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
  3097. GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
  3098. chan_hdl, num_xfers, xfer);
  3099. return -GSI_STATUS_INVALID_PARAMS;
  3100. }
  3101. if (unlikely(gsi_ctx->chan[chan_hdl].state
  3102. == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3103. GSIERR("bad state %d\n",
  3104. gsi_ctx->chan[chan_hdl].state);
  3105. return -GSI_STATUS_UNSUPPORTED_OP;
  3106. }
  3107. ctx = &gsi_ctx->chan[chan_hdl];
  3108. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3109. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3110. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3111. return -GSI_STATUS_UNSUPPORTED_OP;
  3112. }
  3113. if (ctx->evtr)
  3114. slock = &ctx->evtr->ring.slock;
  3115. else
  3116. slock = &ctx->ring.slock;
  3117. spin_lock_irqsave(slock, flags);
  3118. /* allow only ring doorbell */
  3119. if (!num_xfers)
  3120. goto ring_doorbell;
  3121. /*
  3122. * for GCI channels the responsibility is on the caller to make sure
  3123. * there is enough room in the TRE.
  3124. */
  3125. if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3126. __gsi_query_channel_free_re(ctx, &free);
  3127. if (num_xfers > free) {
  3128. GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
  3129. chan_hdl, num_xfers, free);
  3130. spin_unlock_irqrestore(slock, flags);
  3131. return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
  3132. }
  3133. }
  3134. wp_rollback = ctx->ring.wp_local;
  3135. for (i = 0; i < num_xfers; i++) {
  3136. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3137. if (__gsi_populate_gci_tre(ctx, &xfer[i]))
  3138. break;
  3139. } else {
  3140. if (__gsi_populate_tre(ctx, &xfer[i]))
  3141. break;
  3142. }
  3143. gsi_incr_ring_wp(&ctx->ring);
  3144. }
  3145. if (i != num_xfers) {
  3146. /* reject all the xfers */
  3147. ctx->ring.wp_local = wp_rollback;
  3148. spin_unlock_irqrestore(slock, flags);
  3149. return -GSI_STATUS_INVALID_PARAMS;
  3150. }
  3151. ctx->stats.queued += num_xfers;
  3152. ring_doorbell:
  3153. if (ring_db) {
  3154. /* ensure TRE is set before ringing doorbell */
  3155. wmb();
  3156. gsi_ring_chan_doorbell(ctx);
  3157. }
  3158. spin_unlock_irqrestore(slock, flags);
  3159. return GSI_STATUS_SUCCESS;
  3160. }
  3161. EXPORT_SYMBOL(gsi_queue_xfer);
  3162. int gsi_start_xfer(unsigned long chan_hdl)
  3163. {
  3164. struct gsi_chan_ctx *ctx;
  3165. if (!gsi_ctx) {
  3166. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3167. return -GSI_STATUS_NODEV;
  3168. }
  3169. if (chan_hdl >= gsi_ctx->max_ch) {
  3170. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3171. return -GSI_STATUS_INVALID_PARAMS;
  3172. }
  3173. ctx = &gsi_ctx->chan[chan_hdl];
  3174. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3175. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3176. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3177. return -GSI_STATUS_UNSUPPORTED_OP;
  3178. }
  3179. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3180. GSIERR("bad state %d\n", ctx->state);
  3181. return -GSI_STATUS_UNSUPPORTED_OP;
  3182. }
  3183. if (ctx->ring.wp == ctx->ring.wp_local)
  3184. return GSI_STATUS_SUCCESS;
  3185. gsi_ring_chan_doorbell(ctx);
  3186. return GSI_STATUS_SUCCESS;
  3187. };
  3188. EXPORT_SYMBOL(gsi_start_xfer);
  3189. int gsi_poll_channel(unsigned long chan_hdl,
  3190. struct gsi_chan_xfer_notify *notify)
  3191. {
  3192. int unused_var;
  3193. return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
  3194. }
  3195. EXPORT_SYMBOL(gsi_poll_channel);
  3196. int gsi_poll_n_channel(unsigned long chan_hdl,
  3197. struct gsi_chan_xfer_notify *notify,
  3198. int expected_num, int *actual_num)
  3199. {
  3200. struct gsi_chan_ctx *ctx;
  3201. uint64_t rp;
  3202. int ee;
  3203. int i;
  3204. unsigned long flags;
  3205. if (!gsi_ctx) {
  3206. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3207. return -GSI_STATUS_NODEV;
  3208. }
  3209. if (chan_hdl >= gsi_ctx->max_ch || !notify ||
  3210. !actual_num || expected_num <= 0) {
  3211. GSIERR("bad params chan_hdl=%lu notify=%pK\n",
  3212. chan_hdl, notify);
  3213. GSIERR("actual_num=%pK expected_num=%d\n",
  3214. actual_num, expected_num);
  3215. return -GSI_STATUS_INVALID_PARAMS;
  3216. }
  3217. ctx = &gsi_ctx->chan[chan_hdl];
  3218. ee = gsi_ctx->per.ee;
  3219. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3220. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3221. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3222. return -GSI_STATUS_UNSUPPORTED_OP;
  3223. }
  3224. if (!ctx->evtr) {
  3225. GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
  3226. return -GSI_STATUS_UNSUPPORTED_OP;
  3227. }
  3228. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3229. if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
  3230. /* update rp to see of we have anything new to process */
  3231. rp = gsi_readl(gsi_ctx->base +
  3232. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
  3233. rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
  3234. ctx->evtr->ring.rp = rp;
  3235. /* read gsi event ring rp again if last read is empty */
  3236. if (rp == ctx->evtr->ring.rp_local) {
  3237. /* event ring is empty */
  3238. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3239. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  3240. /* do another read to close a small window */
  3241. __iowmb();
  3242. rp = gsi_readl(gsi_ctx->base +
  3243. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(
  3244. ctx->evtr->id, ee));
  3245. rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
  3246. ctx->evtr->ring.rp = rp;
  3247. if (rp == ctx->evtr->ring.rp_local) {
  3248. spin_unlock_irqrestore(
  3249. &ctx->evtr->ring.slock,
  3250. flags);
  3251. ctx->stats.poll_empty++;
  3252. return GSI_STATUS_POLL_EMPTY;
  3253. }
  3254. }
  3255. }
  3256. *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
  3257. ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
  3258. if (*actual_num > expected_num)
  3259. *actual_num = expected_num;
  3260. for (i = 0; i < *actual_num; i++)
  3261. gsi_process_evt_re(ctx->evtr, notify + i, false);
  3262. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3263. ctx->stats.poll_ok++;
  3264. return GSI_STATUS_SUCCESS;
  3265. }
  3266. EXPORT_SYMBOL(gsi_poll_n_channel);
  3267. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
  3268. {
  3269. struct gsi_chan_ctx *ctx, *coal_ctx;
  3270. enum gsi_chan_mode curr;
  3271. unsigned long flags;
  3272. enum gsi_chan_mode chan_mode;
  3273. if (!gsi_ctx) {
  3274. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3275. return -GSI_STATUS_NODEV;
  3276. }
  3277. if (chan_hdl >= gsi_ctx->max_ch) {
  3278. GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
  3279. return -GSI_STATUS_INVALID_PARAMS;
  3280. }
  3281. ctx = &gsi_ctx->chan[chan_hdl];
  3282. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3283. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3284. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3285. return -GSI_STATUS_UNSUPPORTED_OP;
  3286. }
  3287. if (!ctx->evtr || !ctx->evtr->props.exclusive) {
  3288. GSIERR("cannot configure mode on chan_hdl=%lu\n",
  3289. chan_hdl);
  3290. return -GSI_STATUS_UNSUPPORTED_OP;
  3291. }
  3292. if (atomic_read(&ctx->poll_mode))
  3293. curr = GSI_CHAN_MODE_POLL;
  3294. else
  3295. curr = GSI_CHAN_MODE_CALLBACK;
  3296. if (mode == curr) {
  3297. GSIDBG("already in requested mode %u chan_hdl=%lu\n",
  3298. curr, chan_hdl);
  3299. return -GSI_STATUS_UNSUPPORTED_OP;
  3300. }
  3301. spin_lock_irqsave(&gsi_ctx->slock, flags);
  3302. if (curr == GSI_CHAN_MODE_CALLBACK &&
  3303. mode == GSI_CHAN_MODE_POLL) {
  3304. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3305. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3306. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
  3307. atomic_set(&ctx->poll_mode, mode);
  3308. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3309. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3310. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3311. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3312. if (coal_ctx != NULL)
  3313. atomic_set(&coal_ctx->poll_mode, mode);
  3314. }
  3315. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3316. ctx->evtr->id, mode);
  3317. ctx->stats.callback_to_poll++;
  3318. }
  3319. if (curr == GSI_CHAN_MODE_POLL &&
  3320. mode == GSI_CHAN_MODE_CALLBACK) {
  3321. atomic_set(&ctx->poll_mode, mode);
  3322. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3323. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3324. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3325. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3326. if (coal_ctx != NULL)
  3327. atomic_set(&coal_ctx->poll_mode, mode);
  3328. }
  3329. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
  3330. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3331. ctx->evtr->id, mode);
  3332. /*
  3333. * In GSI 2.2 and 2.5 there is a limitation that can lead
  3334. * to losing an interrupt. For these versions an
  3335. * explicit check is needed after enabling the interrupt
  3336. */
  3337. if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
  3338. gsi_ctx->per.ver == GSI_VER_2_5) &&
  3339. !gsi_ctx->per.skip_ieob_mask_wa) {
  3340. u32 src = gsi_readl(gsi_ctx->base +
  3341. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
  3342. gsi_ctx->per.ee));
  3343. if (src & (1 << ctx->evtr->id)) {
  3344. __gsi_config_ieob_irq(
  3345. gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3346. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3347. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
  3348. gsi_ctx->per.ee));
  3349. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3350. spin_lock_irqsave(&ctx->evtr->ring.slock,
  3351. flags);
  3352. chan_mode = atomic_xchg(&ctx->poll_mode,
  3353. GSI_CHAN_MODE_POLL);
  3354. spin_unlock_irqrestore(
  3355. &ctx->evtr->ring.slock, flags);
  3356. ctx->stats.poll_pending_irq++;
  3357. GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
  3358. ctx->stats.poll_pending_irq,
  3359. chan_mode);
  3360. if (chan_mode == GSI_CHAN_MODE_POLL)
  3361. return GSI_STATUS_SUCCESS;
  3362. else
  3363. return -GSI_STATUS_PENDING_IRQ;
  3364. }
  3365. }
  3366. ctx->stats.poll_to_callback++;
  3367. }
  3368. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3369. return GSI_STATUS_SUCCESS;
  3370. }
  3371. EXPORT_SYMBOL(gsi_config_channel_mode);
  3372. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3373. union gsi_channel_scratch *scr)
  3374. {
  3375. struct gsi_chan_ctx *ctx;
  3376. if (!gsi_ctx) {
  3377. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3378. return -GSI_STATUS_NODEV;
  3379. }
  3380. if (!props || !scr) {
  3381. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  3382. return -GSI_STATUS_INVALID_PARAMS;
  3383. }
  3384. if (chan_hdl >= gsi_ctx->max_ch) {
  3385. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3386. return -GSI_STATUS_INVALID_PARAMS;
  3387. }
  3388. ctx = &gsi_ctx->chan[chan_hdl];
  3389. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3390. GSIERR("bad state %d\n", ctx->state);
  3391. return -GSI_STATUS_UNSUPPORTED_OP;
  3392. }
  3393. mutex_lock(&ctx->mlock);
  3394. *props = ctx->props;
  3395. *scr = ctx->scratch;
  3396. mutex_unlock(&ctx->mlock);
  3397. return GSI_STATUS_SUCCESS;
  3398. }
  3399. EXPORT_SYMBOL(gsi_get_channel_cfg);
  3400. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3401. union gsi_channel_scratch *scr)
  3402. {
  3403. struct gsi_chan_ctx *ctx;
  3404. if (!gsi_ctx) {
  3405. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3406. return -GSI_STATUS_NODEV;
  3407. }
  3408. if (!props || gsi_validate_channel_props(props)) {
  3409. GSIERR("bad params props=%pK\n", props);
  3410. return -GSI_STATUS_INVALID_PARAMS;
  3411. }
  3412. if (chan_hdl >= gsi_ctx->max_ch) {
  3413. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3414. return -GSI_STATUS_INVALID_PARAMS;
  3415. }
  3416. ctx = &gsi_ctx->chan[chan_hdl];
  3417. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3418. GSIERR("bad state %d\n", ctx->state);
  3419. return -GSI_STATUS_UNSUPPORTED_OP;
  3420. }
  3421. if (ctx->props.ch_id != props->ch_id ||
  3422. ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
  3423. GSIERR("changing immutable fields not supported\n");
  3424. return -GSI_STATUS_UNSUPPORTED_OP;
  3425. }
  3426. mutex_lock(&ctx->mlock);
  3427. ctx->props = *props;
  3428. if (scr)
  3429. ctx->scratch = *scr;
  3430. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  3431. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  3432. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  3433. /* restore scratch */
  3434. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  3435. mutex_unlock(&ctx->mlock);
  3436. return GSI_STATUS_SUCCESS;
  3437. }
  3438. EXPORT_SYMBOL(gsi_set_channel_cfg);
  3439. static void gsi_configure_ieps(void *base, enum gsi_ver ver)
  3440. {
  3441. void __iomem *gsi_base = base;
  3442. gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
  3443. gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
  3444. gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
  3445. gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
  3446. gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
  3447. gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
  3448. gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS);
  3449. gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
  3450. gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
  3451. gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
  3452. gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
  3453. gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
  3454. gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
  3455. gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
  3456. gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
  3457. gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
  3458. if (ver >= GSI_VER_2_5)
  3459. gsi_writel(17,
  3460. gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS);
  3461. }
  3462. static void gsi_configure_bck_prs_matrix(void *base)
  3463. {
  3464. void __iomem *gsi_base = (void __iomem *) base;
  3465. /*
  3466. * For now, these are default values. In the future, GSI FW image will
  3467. * produce optimized back-pressure values based on the FW image.
  3468. */
  3469. gsi_writel(0xfffffffe,
  3470. gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
  3471. gsi_writel(0xffffffff,
  3472. gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
  3473. gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
  3474. gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
  3475. gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
  3476. gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
  3477. gsi_writel(0xffffefff,
  3478. gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
  3479. gsi_writel(0xffffffff,
  3480. gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
  3481. gsi_writel(0x00000000,
  3482. gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
  3483. gsi_writel(0x00000000,
  3484. gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
  3485. gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
  3486. gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
  3487. gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
  3488. gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
  3489. gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
  3490. gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
  3491. gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
  3492. gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
  3493. gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
  3494. gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
  3495. gsi_writel(0xffffffff,
  3496. gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
  3497. gsi_writel(0xff03ffff,
  3498. gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
  3499. }
  3500. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
  3501. {
  3502. if (!gsi_ctx) {
  3503. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3504. return -GSI_STATUS_NODEV;
  3505. }
  3506. if (!gsi_ctx->base) {
  3507. GSIERR("access to GSI HW has not been mapped\n");
  3508. return -GSI_STATUS_INVALID_PARAMS;
  3509. }
  3510. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3511. GSIERR("Incorrect version %d\n", ver);
  3512. return -GSI_STATUS_ERROR;
  3513. }
  3514. gsi_writel(0, gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
  3515. gsi_writel(per_base_addr,
  3516. gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
  3517. gsi_configure_bck_prs_matrix((void *)gsi_ctx->base);
  3518. gsi_configure_ieps(gsi_ctx->base, ver);
  3519. return 0;
  3520. }
  3521. EXPORT_SYMBOL(gsi_configure_regs);
  3522. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  3523. {
  3524. void __iomem *gsi_base;
  3525. uint32_t value;
  3526. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3527. GSIERR("Incorrect version %d\n", ver);
  3528. return -GSI_STATUS_ERROR;
  3529. }
  3530. gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
  3531. if (!gsi_base) {
  3532. GSIERR("ioremap failed\n");
  3533. return -GSI_STATUS_RES_ALLOC_FAILURE;
  3534. }
  3535. /* Enable the MCS and set to x2 clocks */
  3536. if (ver >= GSI_VER_1_2) {
  3537. value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
  3538. GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
  3539. gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
  3540. value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
  3541. GSI_GSI_CFG_GSI_ENABLE_BMSK) |
  3542. ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
  3543. GSI_GSI_CFG_MCS_ENABLE_BMSK) |
  3544. ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
  3545. GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
  3546. ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
  3547. GSI_GSI_CFG_UC_IS_MCS_BMSK) |
  3548. ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
  3549. GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
  3550. ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
  3551. GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
  3552. } else {
  3553. value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
  3554. GSI_GSI_CFG_GSI_ENABLE_BMSK) |
  3555. ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
  3556. GSI_GSI_CFG_MCS_ENABLE_BMSK) |
  3557. ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
  3558. GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
  3559. ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
  3560. GSI_GSI_CFG_UC_IS_MCS_BMSK));
  3561. }
  3562. /* GSI frequency is peripheral frequency divided by 3 (2+1) */
  3563. if (ver >= GSI_VER_2_5)
  3564. value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) &
  3565. GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK);
  3566. gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
  3567. iounmap(gsi_base);
  3568. return 0;
  3569. }
  3570. EXPORT_SYMBOL(gsi_enable_fw);
  3571. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  3572. unsigned long *size, enum gsi_ver ver)
  3573. {
  3574. unsigned long maxn;
  3575. if (!gsi_ctx) {
  3576. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3577. return;
  3578. }
  3579. switch (ver) {
  3580. case GSI_VER_1_0:
  3581. case GSI_VER_1_2:
  3582. case GSI_VER_1_3:
  3583. maxn = GSI_GSI_INST_RAM_n_MAXn;
  3584. break;
  3585. case GSI_VER_2_0:
  3586. maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn;
  3587. break;
  3588. case GSI_VER_2_2:
  3589. maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn;
  3590. break;
  3591. case GSI_VER_2_5:
  3592. maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn;
  3593. break;
  3594. case GSI_VER_2_7:
  3595. maxn = GSI_V2_7_GSI_INST_RAM_n_MAXn;
  3596. break;
  3597. case GSI_VER_2_9:
  3598. maxn = GSI_V2_9_GSI_INST_RAM_n_MAXn;
  3599. break;
  3600. case GSI_VER_ERR:
  3601. case GSI_VER_MAX:
  3602. default:
  3603. GSIERR("GSI version is not supported %d\n", ver);
  3604. WARN_ON(1);
  3605. return;
  3606. }
  3607. if (size)
  3608. *size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1);
  3609. if (base_offset) {
  3610. if (ver < GSI_VER_2_5)
  3611. *base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
  3612. else
  3613. *base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0);
  3614. }
  3615. }
  3616. EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
  3617. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3618. {
  3619. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
  3620. uint32_t val;
  3621. int res;
  3622. if (!gsi_ctx) {
  3623. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3624. return -GSI_STATUS_NODEV;
  3625. }
  3626. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3627. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3628. return -GSI_STATUS_INVALID_PARAMS;
  3629. }
  3630. mutex_lock(&gsi_ctx->mlock);
  3631. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3632. /* invalidate the response */
  3633. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3634. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3635. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3636. gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
  3637. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3638. gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
  3639. val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
  3640. GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
  3641. ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
  3642. GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
  3643. ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
  3644. GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
  3645. gsi_writel(val, gsi_ctx->base +
  3646. GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
  3647. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3648. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3649. if (res == 0) {
  3650. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3651. res = -GSI_STATUS_TIMED_OUT;
  3652. goto free_lock;
  3653. }
  3654. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3655. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3656. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3657. GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
  3658. GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
  3659. *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
  3660. res = -GSI_STATUS_AGAIN;
  3661. goto free_lock;
  3662. }
  3663. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3664. GSIERR("No response received\n");
  3665. res = -GSI_STATUS_ERROR;
  3666. goto free_lock;
  3667. }
  3668. res = GSI_STATUS_SUCCESS;
  3669. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3670. free_lock:
  3671. mutex_unlock(&gsi_ctx->mlock);
  3672. return res;
  3673. }
  3674. EXPORT_SYMBOL(gsi_halt_channel_ee);
  3675. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3676. {
  3677. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
  3678. struct gsi_chan_ctx *ctx;
  3679. uint32_t val;
  3680. int res;
  3681. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3682. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3683. return -GSI_STATUS_INVALID_PARAMS;
  3684. }
  3685. if (ee == 0)
  3686. return gsi_alloc_ap_channel(chan_idx);
  3687. mutex_lock(&gsi_ctx->mlock);
  3688. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3689. /* invalidate the response */
  3690. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3691. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3692. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3693. gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
  3694. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3695. val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
  3696. GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
  3697. ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
  3698. GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
  3699. ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
  3700. GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
  3701. gsi_writel(val, gsi_ctx->base +
  3702. GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
  3703. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3704. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3705. if (res == 0) {
  3706. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3707. res = -GSI_STATUS_TIMED_OUT;
  3708. goto free_lock;
  3709. }
  3710. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3711. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3712. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3713. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
  3714. GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
  3715. *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
  3716. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3717. goto free_lock;
  3718. }
  3719. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3720. GSIERR("No response received\n");
  3721. res = -GSI_STATUS_ERROR;
  3722. goto free_lock;
  3723. }
  3724. if (ee == 0) {
  3725. ctx = &gsi_ctx->chan[chan_idx];
  3726. gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
  3727. }
  3728. res = GSI_STATUS_SUCCESS;
  3729. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3730. free_lock:
  3731. mutex_unlock(&gsi_ctx->mlock);
  3732. return res;
  3733. }
  3734. EXPORT_SYMBOL(gsi_alloc_channel_ee);
  3735. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
  3736. {
  3737. if (!gsi_ctx) {
  3738. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3739. return -GSI_STATUS_NODEV;
  3740. }
  3741. if (!gsi_ctx->base) {
  3742. GSIERR("access to GSI HW has not been mapped\n");
  3743. return -GSI_STATUS_INVALID_PARAMS;
  3744. }
  3745. gsi_writel(per_ep_index,
  3746. gsi_ctx->base +
  3747. GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(chan_num, ee));
  3748. return 0;
  3749. }
  3750. EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
  3751. void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
  3752. uint32_t db_addr_low, uint32_t db_addr_high)
  3753. {
  3754. if (!gsi_ctx) {
  3755. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3756. return;
  3757. }
  3758. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  3759. gsi_writel(db_addr_low, gsi_ctx->base +
  3760. GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_ring_hdl,
  3761. gsi_ctx->per.ee));
  3762. gsi_writel(db_addr_high, gsi_ctx->base +
  3763. GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_ring_hdl,
  3764. gsi_ctx->per.ee));
  3765. } else {
  3766. gsi_writel(db_addr_low, gsi_ctx->base +
  3767. GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_ring_hdl,
  3768. gsi_ctx->per.ee));
  3769. gsi_writel(db_addr_high, gsi_ctx->base +
  3770. GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_ring_hdl,
  3771. gsi_ctx->per.ee));
  3772. }
  3773. }
  3774. EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
  3775. void gsi_wdi3_dump_register(unsigned long chan_hdl)
  3776. {
  3777. uint32_t val;
  3778. if (!gsi_ctx) {
  3779. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3780. return;
  3781. }
  3782. GSIDBG("reg dump ch id %ld\n", chan_hdl);
  3783. val = gsi_readl(gsi_ctx->base +
  3784. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
  3785. gsi_ctx->per.ee));
  3786. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS 0x%x\n", val);
  3787. val = gsi_readl(gsi_ctx->base +
  3788. GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(chan_hdl,
  3789. gsi_ctx->per.ee));
  3790. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS 0x%x\n", val);
  3791. val = gsi_readl(gsi_ctx->base +
  3792. GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(chan_hdl,
  3793. gsi_ctx->per.ee));
  3794. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS 0x%x\n", val);
  3795. val = gsi_readl(gsi_ctx->base +
  3796. GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(chan_hdl,
  3797. gsi_ctx->per.ee));
  3798. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS 0x%x\n", val);
  3799. val = gsi_readl(gsi_ctx->base +
  3800. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(chan_hdl,
  3801. gsi_ctx->per.ee));
  3802. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS 0x%x\n", val);
  3803. val = gsi_readl(gsi_ctx->base +
  3804. GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(chan_hdl,
  3805. gsi_ctx->per.ee));
  3806. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS 0x%x\n", val);
  3807. val = gsi_readl(gsi_ctx->base +
  3808. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(chan_hdl,
  3809. gsi_ctx->per.ee));
  3810. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS 0x%x\n", val);
  3811. val = gsi_readl(gsi_ctx->base +
  3812. GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(chan_hdl,
  3813. gsi_ctx->per.ee));
  3814. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS 0x%x\n", val);
  3815. val = gsi_readl(gsi_ctx->base +
  3816. GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(chan_hdl,
  3817. gsi_ctx->per.ee));
  3818. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS 0x%x\n", val);
  3819. val = gsi_readl(gsi_ctx->base +
  3820. GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(chan_hdl,
  3821. gsi_ctx->per.ee));
  3822. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS 0x%x\n", val);
  3823. val = gsi_readl(gsi_ctx->base +
  3824. GSI_EE_n_GSI_CH_k_QOS_OFFS(chan_hdl,
  3825. gsi_ctx->per.ee));
  3826. GSIDBG("GSI_EE_n_GSI_CH_k_QOS_OFFS 0x%x\n", val);
  3827. val = gsi_readl(gsi_ctx->base +
  3828. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  3829. gsi_ctx->per.ee));
  3830. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS 0x%x\n", val);
  3831. val = gsi_readl(gsi_ctx->base +
  3832. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  3833. gsi_ctx->per.ee));
  3834. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS 0x%x\n", val);
  3835. val = gsi_readl(gsi_ctx->base +
  3836. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  3837. gsi_ctx->per.ee));
  3838. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS 0x%x\n", val);
  3839. val = gsi_readl(gsi_ctx->base +
  3840. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  3841. gsi_ctx->per.ee));
  3842. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS 0x%x\n", val);
  3843. }
  3844. EXPORT_SYMBOL(gsi_wdi3_dump_register);
  3845. static int msm_gsi_probe(struct platform_device *pdev)
  3846. {
  3847. struct device *dev = &pdev->dev;
  3848. pr_debug("gsi_probe\n");
  3849. gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
  3850. if (!gsi_ctx) {
  3851. dev_err(dev, "failed to allocated gsi context\n");
  3852. return -ENOMEM;
  3853. }
  3854. gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
  3855. "gsi", 0);
  3856. if (gsi_ctx->ipc_logbuf == NULL)
  3857. GSIERR("failed to create IPC log, continue...\n");
  3858. gsi_ctx->dev = dev;
  3859. init_completion(&gsi_ctx->gen_ee_cmd_compl);
  3860. gsi_debugfs_init();
  3861. return 0;
  3862. }
  3863. static struct platform_driver msm_gsi_driver = {
  3864. .probe = msm_gsi_probe,
  3865. .driver = {
  3866. .name = "gsi",
  3867. .of_match_table = msm_gsi_match,
  3868. },
  3869. };
  3870. static struct platform_device *pdev;
  3871. /**
  3872. * Module Init.
  3873. */
  3874. static int __init gsi_init(void)
  3875. {
  3876. int ret;
  3877. pr_debug("%s\n", __func__);
  3878. ret = platform_driver_register(&msm_gsi_driver);
  3879. if (ret < 0)
  3880. goto out;
  3881. if (running_emulation) {
  3882. pdev = platform_device_register_simple("gsi", -1, NULL, 0);
  3883. if (IS_ERR(pdev)) {
  3884. ret = PTR_ERR(pdev);
  3885. platform_driver_unregister(&msm_gsi_driver);
  3886. goto out;
  3887. }
  3888. }
  3889. out:
  3890. return ret;
  3891. }
  3892. arch_initcall(gsi_init);
  3893. /*
  3894. * Module exit.
  3895. */
  3896. static void __exit gsi_exit(void)
  3897. {
  3898. if (running_emulation && pdev)
  3899. platform_device_unregister(pdev);
  3900. platform_driver_unregister(&msm_gsi_driver);
  3901. }
  3902. module_exit(gsi_exit);
  3903. MODULE_LICENSE("GPL v2");
  3904. MODULE_DESCRIPTION("Generic Software Interface (GSI)");