gsi.c 134 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/io.h>
  8. #include <linux/log2.h>
  9. #include <linux/module.h>
  10. #include <linux/msm_gsi.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/delay.h>
  13. #include "gsi.h"
  14. #include "gsi_emulation.h"
  15. #include "gsihal.h"
  16. #include <asm/arch_timer.h>
  17. #include <linux/sched/clock.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/sched.h>
  20. #include <linux/wait.h>
  21. #include <linux/delay.h>
  22. #define GSI_CMD_TIMEOUT (5*HZ)
  23. #define GSI_START_CMD_TIMEOUT_MS 1000
  24. #define GSI_CMD_POLL_CNT 5
  25. #define GSI_STOP_CMD_TIMEOUT_MS 200
  26. #define GSI_MAX_CH_LOW_WEIGHT 15
  27. #define GSI_IRQ_STORM_THR 5
  28. #define GSI_STOP_CMD_POLL_CNT 4
  29. #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
  30. #define GSI_RESET_WA_MIN_SLEEP 1000
  31. #define GSI_RESET_WA_MAX_SLEEP 2000
  32. #define GSI_CHNL_STATE_MAX_RETRYCNT 10
  33. #define GSI_STTS_REG_BITS 32
  34. #define GSI_MSB_MASK 0xFFFFFFFF00000000ULL
  35. #define GSI_LSB_MASK 0x00000000FFFFFFFFULL
  36. #define GSI_MSB(num) ((u32)((num & GSI_MSB_MASK) >> 32))
  37. #define GSI_LSB(num) ((u32)(num & GSI_LSB_MASK))
  38. #ifndef CONFIG_DEBUG_FS
  39. void gsi_debugfs_init(void)
  40. {
  41. }
  42. #endif
  43. static const struct of_device_id msm_gsi_match[] = {
  44. { .compatible = "qcom,msm_gsi", },
  45. { },
  46. };
  47. #if defined(CONFIG_IPA_EMULATION)
  48. static bool running_emulation = true;
  49. #else
  50. static bool running_emulation;
  51. #endif
  52. struct gsi_ctx *gsi_ctx;
  53. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  54. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr);
  55. static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
  56. {
  57. uint32_t curr;
  58. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee);
  59. gsihal_write_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ_MSK, ee,
  60. (curr & ~mask) | (val & mask));
  61. }
  62. static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
  63. {
  64. uint32_t curr;
  65. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee);
  66. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK, ee,
  67. (curr & ~mask) | (val & mask));
  68. }
  69. static void __gsi_config_all_ch_irq(int ee, uint32_t mask, uint32_t val)
  70. {
  71. uint32_t curr, k, max_k;
  72. max_k = gsihal_get_bit_map_array_size();
  73. for (k = 0; k < max_k; k++)
  74. {
  75. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k);
  76. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_k, ee, k,
  77. (curr & ~mask) | (val & mask));
  78. }
  79. }
  80. static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
  81. {
  82. uint32_t curr;
  83. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee);
  84. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK, ee,
  85. (curr & ~mask) | (val & mask));
  86. }
  87. static void __gsi_config_all_evt_irq(int ee, uint32_t mask, uint32_t val)
  88. {
  89. uint32_t curr, k, max_k;
  90. max_k = gsihal_get_bit_map_array_size();
  91. for (k = 0; k < max_k; k++)
  92. {
  93. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k);
  94. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_k, ee, k,
  95. (curr & ~mask) | (val & mask));
  96. }
  97. }
  98. static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
  99. {
  100. uint32_t curr;
  101. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  102. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee,
  103. (curr & ~mask) | (val & mask));
  104. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  105. curr, ((curr & ~mask) | (val & mask)));
  106. }
  107. static void __gsi_config_all_ieob_irq(int ee, uint32_t mask, uint32_t val)
  108. {
  109. uint32_t curr, k, max_k;
  110. max_k = gsihal_get_bit_map_array_size();
  111. for (k = 0; k < max_k; k++)
  112. {
  113. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  114. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  115. (curr & ~mask) | (val & mask));
  116. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  117. curr, ((curr & ~mask) | (val & mask)));
  118. }
  119. }
  120. static void __gsi_config_ieob_irq_k(int ee, uint32_t k, uint32_t mask, uint32_t val)
  121. {
  122. uint32_t curr;
  123. curr = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  124. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k,
  125. (curr & ~mask) | (val & mask));
  126. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  127. curr, ((curr & ~mask) | (val & mask)));
  128. }
  129. static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
  130. {
  131. uint32_t curr;
  132. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee);
  133. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_EN, ee,
  134. (curr & ~mask) | (val & mask));
  135. }
  136. static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
  137. {
  138. uint32_t curr;
  139. curr = gsihal_read_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee);
  140. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_EN, ee,
  141. (curr & ~mask) | (val & mask));
  142. }
  143. static void gsi_channel_state_change_wait(unsigned long chan_hdl,
  144. struct gsi_chan_ctx *ctx,
  145. uint32_t tm, enum gsi_ch_cmd_opcode op)
  146. {
  147. int poll_cnt;
  148. int gsi_pending_intr;
  149. int res;
  150. struct gsihal_reg_ctx_type_irq type;
  151. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  152. int ee = gsi_ctx->per.ee;
  153. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  154. int stop_in_proc_retry = 0;
  155. int stop_retry = 0;
  156. /*
  157. * Start polling the GSI channel for
  158. * duration = tm * GSI_CMD_POLL_CNT.
  159. * We need to do polling of gsi state for improving debugability
  160. * of gsi hw state.
  161. */
  162. for (poll_cnt = 0;
  163. poll_cnt < GSI_CMD_POLL_CNT;
  164. poll_cnt++) {
  165. res = wait_for_completion_timeout(&ctx->compl,
  166. msecs_to_jiffies(tm));
  167. /* Interrupt received, return */
  168. if (res != 0)
  169. return;
  170. gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ, ee, &type);
  171. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  172. gsi_pending_intr = gsihal_read_reg_nk(
  173. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k,
  174. ee, gsihal_get_ch_reg_idx(chan_hdl));
  175. } else {
  176. gsi_pending_intr = gsihal_read_reg_n(
  177. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  178. }
  179. /* Update the channel state only if interrupt was raised
  180. * on particular channel and also checking global interrupt
  181. * is raised for channel control.
  182. */
  183. if ((type.ch_ctrl) &&
  184. (gsi_pending_intr & gsihal_get_ch_reg_mask(chan_hdl))) {
  185. /*
  186. * Check channel state here in case the channel is
  187. * already started but interrupt is not yet received.
  188. */
  189. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  190. ee, chan_hdl, &ch_k_cntxt_0);
  191. curr_state = ch_k_cntxt_0.chstate;
  192. }
  193. if (op == GSI_CH_START) {
  194. if (curr_state == GSI_CHAN_STATE_STARTED ||
  195. curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  196. ctx->state = curr_state;
  197. return;
  198. }
  199. }
  200. if (op == GSI_CH_STOP) {
  201. if (curr_state == GSI_CHAN_STATE_STOPPED)
  202. stop_retry++;
  203. else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
  204. stop_in_proc_retry++;
  205. }
  206. /* if interrupt marked reg after poll count reaching to max
  207. * keep loop to continue reach max stop proc and max stop count.
  208. */
  209. if (stop_retry == 1 || stop_in_proc_retry == 1)
  210. poll_cnt = 0;
  211. /* If stop channel retry reached to max count
  212. * clear the pending interrupt, if channel already stopped.
  213. */
  214. if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
  215. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  216. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k,
  217. ee, gsihal_get_ch_reg_idx(chan_hdl),
  218. gsi_pending_intr);
  219. }
  220. else {
  221. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR,
  222. ee,
  223. gsi_pending_intr);
  224. }
  225. ctx->state = curr_state;
  226. return;
  227. }
  228. /* If channel state stop in progress case no need
  229. * to wait for long time.
  230. */
  231. if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
  232. ctx->state = curr_state;
  233. return;
  234. }
  235. GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
  236. chan_hdl,
  237. type,
  238. ctx->state,
  239. gsi_pending_intr);
  240. }
  241. GSIDBG("invalidating the channel state when timeout happens\n");
  242. ctx->state = curr_state;
  243. }
  244. static void gsi_handle_ch_ctrl(int ee)
  245. {
  246. uint32_t ch;
  247. int i, k, max_k;
  248. uint32_t ch_hdl;
  249. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  250. struct gsi_chan_ctx *ctx;
  251. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  252. max_k = gsihal_get_bit_map_array_size();
  253. for (k = 0; k < max_k; k++) {
  254. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_k, ee, k);
  255. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_k, ee, k, ch);
  256. GSIDBG("ch %x\n", ch);
  257. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  258. if ((1 << i) & ch) {
  259. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  260. if (ch_hdl >= gsi_ctx->max_ch ||
  261. ch_hdl >= GSI_CHAN_MAX) {
  262. GSIERR("invalid channel %d\n",
  263. ch_hdl);
  264. break;
  265. }
  266. ctx = &gsi_ctx->chan[ch_hdl];
  267. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  268. ee, ch_hdl, &ch_k_cntxt_0);
  269. ctx->state = ch_k_cntxt_0.chstate;
  270. GSIDBG("ch %u state updated to %u\n",
  271. ch_hdl, ctx->state);
  272. complete(&ctx->compl);
  273. gsi_ctx->ch_dbg[ch_hdl].cmd_completed++;
  274. }
  275. }
  276. }
  277. } else {
  278. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ, ee);
  279. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR, ee, ch);
  280. GSIDBG("ch %x\n", ch);
  281. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  282. if ((1 << i) & ch) {
  283. if (i >= gsi_ctx->max_ch ||
  284. i >= GSI_CHAN_MAX) {
  285. GSIERR("invalid channel %d\n", i);
  286. break;
  287. }
  288. ctx = &gsi_ctx->chan[i];
  289. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  290. ee, i, &ch_k_cntxt_0);
  291. ctx->state = ch_k_cntxt_0.chstate;
  292. GSIDBG("ch %u state updated to %u\n", i,
  293. ctx->state);
  294. complete(&ctx->compl);
  295. gsi_ctx->ch_dbg[i].cmd_completed++;
  296. }
  297. }
  298. }
  299. }
  300. static void gsi_handle_ev_ctrl(int ee)
  301. {
  302. uint32_t ch;
  303. int i, k;
  304. uint32_t evt_hdl, max_k;
  305. struct gsi_evt_ctx *ctx;
  306. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  307. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  308. max_k = gsihal_get_bit_map_array_size();
  309. for (k = 0; k < max_k; k++) {
  310. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_k, ee, k);
  311. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  312. GSIDBG("ev %x\n", ch);
  313. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  314. if ((1 << i) & ch) {
  315. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  316. if (evt_hdl >= gsi_ctx->max_ev ||
  317. evt_hdl >= GSI_EVT_RING_MAX) {
  318. GSIERR("invalid event %d\n",
  319. evt_hdl);
  320. break;
  321. }
  322. ctx = &gsi_ctx->evtr[evt_hdl];
  323. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  324. ee, evt_hdl, &ev_ch_k_cntxt_0);
  325. ctx->state = ev_ch_k_cntxt_0.chstate;
  326. GSIDBG("evt %u state updated to %u\n",
  327. evt_hdl, ctx->state);
  328. complete(&ctx->compl);
  329. }
  330. }
  331. }
  332. } else {
  333. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ, ee);
  334. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR, ee, ch);
  335. GSIDBG("ev %x\n", ch);
  336. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  337. if ((1 << i) & ch) {
  338. if (i >= gsi_ctx->max_ev ||
  339. i >= GSI_EVT_RING_MAX) {
  340. GSIERR("invalid event %d\n", i);
  341. break;
  342. }
  343. ctx = &gsi_ctx->evtr[i];
  344. gsihal_read_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  345. ee, i, &ev_ch_k_cntxt_0);
  346. ctx->state = ev_ch_k_cntxt_0.chstate;
  347. GSIDBG("evt %u state updated to %u\n", i,
  348. ctx->state);
  349. complete(&ctx->compl);
  350. }
  351. }
  352. }
  353. }
  354. static void gsi_handle_glob_err(uint32_t err)
  355. {
  356. struct gsi_log_err *log;
  357. struct gsi_chan_ctx *ch;
  358. struct gsi_evt_ctx *ev;
  359. struct gsi_chan_err_notify chan_notify;
  360. struct gsi_evt_err_notify evt_notify;
  361. struct gsi_per_notify per_notify;
  362. enum gsi_err_type err_type;
  363. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  364. log = (struct gsi_log_err *)&err;
  365. GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
  366. log->virt_idx);
  367. GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
  368. log->arg2, log->arg3);
  369. err_type = log->err_type;
  370. /*
  371. * These are errors thrown by hardware. We need
  372. * BUG_ON() to capture the hardware state right
  373. * when it is unexpected.
  374. */
  375. switch (err_type) {
  376. case GSI_ERR_TYPE_GLOB:
  377. per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
  378. per_notify.user_data = gsi_ctx->per.user_data;
  379. per_notify.data.err_desc = err & 0xFFFF;
  380. gsi_ctx->per.notify_cb(&per_notify);
  381. break;
  382. case GSI_ERR_TYPE_CHAN:
  383. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
  384. GSIERR("Unexpected ch %d\n", log->virt_idx);
  385. return;
  386. }
  387. ch = &gsi_ctx->chan[log->virt_idx];
  388. chan_notify.chan_user_data = ch->props.chan_user_data;
  389. chan_notify.err_desc = err & 0xFFFF;
  390. if (log->code == GSI_INVALID_TRE_ERR) {
  391. if (log->ee != gsi_ctx->per.ee) {
  392. GSIERR("unexpected EE in event %d\n", log->ee);
  393. GSI_ASSERT();
  394. }
  395. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  396. gsi_ctx->per.ee, log->virt_idx, &ch_k_cntxt_0);
  397. ch->state = ch_k_cntxt_0.chstate;
  398. GSIDBG("ch %u state updated to %u\n", log->virt_idx,
  399. ch->state);
  400. ch->stats.invalid_tre_error++;
  401. if (ch->state == GSI_CHAN_STATE_ERROR) {
  402. GSIERR("Unexpected channel state %d\n",
  403. ch->state);
  404. GSI_ASSERT();
  405. }
  406. chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
  407. } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  408. if (log->ee != gsi_ctx->per.ee) {
  409. GSIERR("unexpected EE in event %d\n", log->ee);
  410. GSI_ASSERT();
  411. }
  412. chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
  413. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  414. if (log->ee != gsi_ctx->per.ee) {
  415. GSIERR("unexpected EE in event %d\n", log->ee);
  416. GSI_ASSERT();
  417. }
  418. chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
  419. complete(&ch->compl);
  420. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  421. chan_notify.evt_id =
  422. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
  423. } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
  424. if (log->ee != gsi_ctx->per.ee) {
  425. GSIERR("unexpected EE in event %d\n", log->ee);
  426. GSI_ASSERT();
  427. }
  428. chan_notify.evt_id =
  429. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
  430. } else if (log->code == GSI_HWO_1_ERR) {
  431. if (log->ee != gsi_ctx->per.ee) {
  432. GSIERR("unexpected EE in event %d\n", log->ee);
  433. GSI_ASSERT();
  434. }
  435. chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
  436. } else {
  437. GSIERR("unexpected event log code %d\n", log->code);
  438. GSI_ASSERT();
  439. }
  440. ch->props.err_cb(&chan_notify);
  441. break;
  442. case GSI_ERR_TYPE_EVT:
  443. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
  444. GSIERR("Unexpected ev %d\n", log->virt_idx);
  445. return;
  446. }
  447. ev = &gsi_ctx->evtr[log->virt_idx];
  448. evt_notify.user_data = ev->props.user_data;
  449. evt_notify.err_desc = err & 0xFFFF;
  450. if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  451. if (log->ee != gsi_ctx->per.ee) {
  452. GSIERR("unexpected EE in event %d\n", log->ee);
  453. GSI_ASSERT();
  454. }
  455. evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
  456. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  457. if (log->ee != gsi_ctx->per.ee) {
  458. GSIERR("unexpected EE in event %d\n", log->ee);
  459. GSI_ASSERT();
  460. }
  461. evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
  462. complete(&ev->compl);
  463. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  464. evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
  465. } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
  466. if (log->ee != gsi_ctx->per.ee) {
  467. GSIERR("unexpected EE in event %d\n", log->ee);
  468. GSI_ASSERT();
  469. }
  470. evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
  471. } else {
  472. GSIERR("unexpected event log code %d\n", log->code);
  473. GSI_ASSERT();
  474. }
  475. ev->props.err_cb(&evt_notify);
  476. break;
  477. }
  478. }
  479. static void gsi_handle_gp_int1(void)
  480. {
  481. complete(&gsi_ctx->gen_ee_cmd_compl);
  482. }
  483. static void gsi_handle_glob_ee(int ee)
  484. {
  485. uint32_t val;
  486. uint32_t err;
  487. struct gsi_per_notify notify;
  488. uint32_t clr = ~0;
  489. struct gsihal_reg_cntxt_glob_irq_stts cntxt_glob_irq_stts;
  490. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GLOB_IRQ_STTS,
  491. ee, &cntxt_glob_irq_stts);
  492. notify.user_data = gsi_ctx->per.user_data;
  493. if(cntxt_glob_irq_stts.error_int) {
  494. err = gsihal_read_reg_n(GSI_EE_n_ERROR_LOG, ee);
  495. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  496. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, ee, 0);
  497. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG_CLR, ee, clr);
  498. gsi_handle_glob_err(err);
  499. }
  500. if (cntxt_glob_irq_stts.gp_int1)
  501. gsi_handle_gp_int1();
  502. if (cntxt_glob_irq_stts.gp_int2) {
  503. notify.evt_id = GSI_PER_EVT_GLOB_GP2;
  504. gsi_ctx->per.notify_cb(&notify);
  505. }
  506. if (cntxt_glob_irq_stts.gp_int3) {
  507. notify.evt_id = GSI_PER_EVT_GLOB_GP3;
  508. gsi_ctx->per.notify_cb(&notify);
  509. }
  510. gsihal_write_reg_n(GSI_EE_n_CNTXT_GLOB_IRQ_CLR, ee, val);
  511. }
  512. static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
  513. {
  514. ctx->wp_local += ctx->elem_sz;
  515. if (ctx->wp_local == ctx->end)
  516. ctx->wp_local = ctx->base;
  517. }
  518. static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
  519. {
  520. ctx->rp_local += ctx->elem_sz;
  521. if (ctx->rp_local == ctx->end)
  522. ctx->rp_local = ctx->base;
  523. }
  524. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
  525. {
  526. WARN_ON(addr < ctx->base || addr >= ctx->end);
  527. return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
  528. }
  529. static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
  530. uint64_t addr2)
  531. {
  532. uint32_t addr_diff;
  533. GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
  534. ctx->base, ctx->end);
  535. if (addr1 < ctx->base || addr1 >= ctx->end) {
  536. GSIERR("address = 0x%llx not in range\n", addr1);
  537. GSI_ASSERT();
  538. }
  539. if (addr2 < ctx->base || addr2 >= ctx->end) {
  540. GSIERR("address = 0x%llx not in range\n", addr2);
  541. GSI_ASSERT();
  542. }
  543. addr_diff = (uint32_t)(addr2 - addr1);
  544. if (addr1 < addr2)
  545. return addr_diff / ctx->elem_sz;
  546. else
  547. return (addr_diff + ctx->len) / ctx->elem_sz;
  548. }
  549. static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
  550. struct gsi_chan_xfer_notify *notify, bool callback)
  551. {
  552. uint32_t ch_id;
  553. struct gsi_chan_ctx *ch_ctx;
  554. uint16_t rp_idx;
  555. uint64_t rp;
  556. ch_id = evt->chid;
  557. if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
  558. GSIERR("Unexpected ch %d\n", ch_id);
  559. return;
  560. }
  561. ch_ctx = &gsi_ctx->chan[ch_id];
  562. if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
  563. ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
  564. return;
  565. if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
  566. rp = evt->xfer_ptr;
  567. if (ch_ctx->ring.rp_local != rp) {
  568. ch_ctx->stats.completed +=
  569. gsi_get_complete_num(&ch_ctx->ring,
  570. ch_ctx->ring.rp_local, rp);
  571. ch_ctx->ring.rp_local = rp;
  572. }
  573. /*
  574. * Increment RP local only in polling context to avoid
  575. * sys len mismatch.
  576. */
  577. if (!(callback && ch_ctx->props.dir ==
  578. GSI_CHAN_DIR_FROM_GSI))
  579. /* the element at RP is also processed */
  580. gsi_incr_ring_rp(&ch_ctx->ring);
  581. ch_ctx->ring.rp = ch_ctx->ring.rp_local;
  582. rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
  583. notify->veid = GSI_VEID_DEFAULT;
  584. } else {
  585. rp_idx = evt->cookie;
  586. notify->veid = evt->veid;
  587. }
  588. WARN_ON(!ch_ctx->user_data[rp_idx].valid);
  589. notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
  590. /*
  591. * In suspend just before stopping the channel possible to receive
  592. * the IEOB interrupt and xfer pointer will not be processed in this
  593. * mode and moving channel poll mode. In resume after starting the
  594. * channel will receive the IEOB interrupt and xfer pointer will be
  595. * overwritten. To avoid this process all data in polling context.
  596. */
  597. if (!(callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)) {
  598. ch_ctx->stats.completed++;
  599. ch_ctx->user_data[rp_idx].valid = false;
  600. }
  601. notify->chan_user_data = ch_ctx->props.chan_user_data;
  602. notify->evt_id = evt->code;
  603. notify->bytes_xfered = evt->len;
  604. if (callback) {
  605. if (atomic_read(&ch_ctx->poll_mode)) {
  606. GSIERR("Calling client callback in polling mode\n");
  607. WARN_ON(1);
  608. }
  609. ch_ctx->props.xfer_cb(notify);
  610. }
  611. }
  612. static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
  613. struct gsi_chan_xfer_notify *notify, bool callback)
  614. {
  615. struct gsi_xfer_compl_evt *evt;
  616. struct gsi_chan_ctx *ch_ctx;
  617. evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
  618. ctx->ring.rp_local - ctx->ring.base);
  619. gsi_process_chan(evt, notify, callback);
  620. /*
  621. * Increment RP local only in polling context to avoid
  622. * sys len mismatch.
  623. */
  624. ch_ctx = &gsi_ctx->chan[evt->chid];
  625. if (callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  626. return;
  627. gsi_incr_ring_rp(&ctx->ring);
  628. /* recycle this element */
  629. gsi_incr_ring_wp(&ctx->ring);
  630. ctx->stats.completed++;
  631. }
  632. static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
  633. {
  634. uint32_t val;
  635. ctx->ring.wp = ctx->ring.wp_local;
  636. val = GSI_LSB(ctx->ring.wp_local);
  637. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_DOORBELL_0,
  638. gsi_ctx->per.ee, ctx->id, val);
  639. }
  640. static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
  641. {
  642. uint32_t val;
  643. /*
  644. * allocate new events for this channel first
  645. * before submitting the new TREs.
  646. * for TO_GSI channels the event ring doorbell is rang as part of
  647. * interrupt handling.
  648. */
  649. if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  650. gsi_ring_evt_doorbell(ctx->evtr);
  651. ctx->ring.wp = ctx->ring.wp_local;
  652. val = GSI_LSB(ctx->ring.wp_local);
  653. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  654. gsi_ctx->per.ee, ctx->props.ch_id, val);
  655. }
  656. static void gsi_handle_ieob(int ee)
  657. {
  658. uint32_t ch, evt_hdl;
  659. int i, k, max_k;
  660. uint64_t rp;
  661. struct gsi_evt_ctx *ctx;
  662. struct gsi_chan_xfer_notify notify;
  663. unsigned long flags;
  664. unsigned long cntr;
  665. uint32_t msk;
  666. bool empty;
  667. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  668. max_k = gsihal_get_bit_map_array_size();
  669. for (k = 0; k < max_k; k++) {
  670. ch = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_k, ee, k);
  671. msk = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
  672. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee, k, ch & msk);
  673. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  674. if ((1 << i) & ch & msk) {
  675. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  676. if (evt_hdl >= gsi_ctx->max_ev ||
  677. evt_hdl >= GSI_EVT_RING_MAX) {
  678. GSIERR("invalid event %d\n",
  679. evt_hdl);
  680. break;
  681. }
  682. ctx = &gsi_ctx->evtr[evt_hdl];
  683. /*
  684. * Don't handle MSI interrupts, only handle IEOB
  685. * IRQs
  686. */
  687. if (ctx->props.intr == GSI_INTR_MSI)
  688. continue;
  689. if (ctx->props.intf !=
  690. GSI_EVT_CHTYPE_GPI_EV) {
  691. GSIERR("Unexpected irq intf %d\n",
  692. ctx->props.intf);
  693. GSI_ASSERT();
  694. }
  695. spin_lock_irqsave(&ctx->ring.slock,
  696. flags);
  697. check_again_v3_0:
  698. cntr = 0;
  699. empty = true;
  700. rp = ctx->props.gsi_read_event_ring_rp(
  701. &ctx->props, ctx->id, ee);
  702. rp |= ctx->ring.rp & GSI_MSB_MASK;
  703. ctx->ring.rp = rp;
  704. while (ctx->ring.rp_local != rp) {
  705. ++cntr;
  706. if (ctx->props.exclusive &&
  707. atomic_read(
  708. &ctx->chan->poll_mode)) {
  709. cntr = 0;
  710. break;
  711. }
  712. gsi_process_evt_re(ctx, &notify,
  713. true);
  714. empty = false;
  715. }
  716. if (!empty)
  717. gsi_ring_evt_doorbell(ctx);
  718. if (cntr != 0)
  719. goto check_again_v3_0;
  720. spin_unlock_irqrestore(&ctx->ring.slock,
  721. flags);
  722. }
  723. }
  724. }
  725. } else {
  726. ch = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ, ee);
  727. msk = gsihal_read_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK, ee);
  728. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, ch & msk);
  729. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  730. if ((1 << i) & ch & msk) {
  731. if (i >= gsi_ctx->max_ev ||
  732. i >= GSI_EVT_RING_MAX) {
  733. GSIERR("invalid event %d\n", i);
  734. break;
  735. }
  736. ctx = &gsi_ctx->evtr[i];
  737. /*
  738. * Don't handle MSI interrupts, only handle IEOB
  739. * IRQs
  740. */
  741. if (ctx->props.intr == GSI_INTR_MSI)
  742. continue;
  743. if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  744. GSIERR("Unexpected irq intf %d\n",
  745. ctx->props.intf);
  746. GSI_ASSERT();
  747. }
  748. spin_lock_irqsave(&ctx->ring.slock, flags);
  749. check_again:
  750. cntr = 0;
  751. empty = true;
  752. rp = ctx->props.gsi_read_event_ring_rp(
  753. &ctx->props, ctx->id, ee);
  754. rp |= ctx->ring.rp & GSI_MSB_MASK;
  755. ctx->ring.rp = rp;
  756. while (ctx->ring.rp_local != rp) {
  757. ++cntr;
  758. if (ctx->props.exclusive &&
  759. atomic_read(
  760. &ctx->chan->poll_mode)) {
  761. cntr = 0;
  762. break;
  763. }
  764. gsi_process_evt_re(ctx, &notify, true);
  765. empty = false;
  766. }
  767. if (!empty)
  768. gsi_ring_evt_doorbell(ctx);
  769. if (cntr != 0)
  770. goto check_again;
  771. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  772. }
  773. }
  774. }
  775. }
  776. static void gsi_handle_inter_ee_ch_ctrl(int ee)
  777. {
  778. uint32_t ch, ch_hdl;
  779. int i, k, max_k;
  780. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  781. max_k = gsihal_get_bit_map_array_size();
  782. for (k = 0; k < max_k; k++) {
  783. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k);
  784. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_k, ee, k, ch);
  785. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  786. if ((1 << i) & ch) {
  787. ch_hdl = i + (GSI_STTS_REG_BITS * k);
  788. /* not currently expected */
  789. GSIERR("ch %u was inter-EE changed\n", ch_hdl);
  790. }
  791. }
  792. }
  793. } else {
  794. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee);
  795. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_GSI_CH_IRQ, ee, ch);
  796. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  797. if ((1 << i) & ch) {
  798. /* not currently expected */
  799. GSIERR("ch %u was inter-EE changed\n", i);
  800. }
  801. }
  802. }
  803. }
  804. static void gsi_handle_inter_ee_ev_ctrl(int ee)
  805. {
  806. uint32_t ch, evt_hdl;
  807. int i, k, max_k;
  808. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  809. max_k = gsihal_get_bit_map_array_size();
  810. for (k = 0; k < max_k; k++) {
  811. ch = gsihal_read_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_k, ee, k);
  812. gsihal_write_reg_nk(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k, ee, k, ch);
  813. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  814. if ((1 << i) & ch) {
  815. evt_hdl = i + (GSI_STTS_REG_BITS * k);
  816. /* not currently expected */
  817. GSIERR("evt %u was inter-EE changed\n",
  818. evt_hdl);
  819. }
  820. }
  821. }
  822. } else {
  823. ch = gsihal_read_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ, ee);
  824. gsihal_write_reg_n(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR, ee, ch);
  825. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  826. if ((1 << i) & ch) {
  827. /* not currently expected */
  828. GSIERR("evt %u was inter-EE changed\n", i);
  829. }
  830. }
  831. }
  832. }
  833. static void gsi_handle_general(int ee)
  834. {
  835. uint32_t val;
  836. struct gsi_per_notify notify;
  837. struct gsihal_reg_cntxt_gsi_irq_stts gsi_irq_stts;
  838. val = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_STTS,
  839. ee, &gsi_irq_stts);
  840. notify.user_data = gsi_ctx->per.user_data;
  841. if (gsi_irq_stts.gsi_mcs_stack_ovrflow)
  842. notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
  843. if (gsi_irq_stts.gsi_cmd_fifo_ovrflow)
  844. notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
  845. if (gsi_irq_stts.gsi_bus_error)
  846. notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
  847. if (gsi_irq_stts.gsi_break_point)
  848. notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
  849. if (gsi_ctx->per.notify_cb)
  850. gsi_ctx->per.notify_cb(&notify);
  851. gsihal_write_reg_n(GSI_EE_n_CNTXT_GSI_IRQ_CLR, ee, val);
  852. }
  853. static void gsi_handle_irq(void)
  854. {
  855. uint32_t type;
  856. int ee = gsi_ctx->per.ee;
  857. int index;
  858. struct gsihal_reg_ctx_type_irq ctx_type_irq;
  859. while (1) {
  860. if (!gsi_ctx->per.clk_status_cb())
  861. break;
  862. type = gsihal_read_reg_n_fields(GSI_EE_n_CNTXT_TYPE_IRQ,
  863. ee, &ctx_type_irq);
  864. if (!type)
  865. break;
  866. GSIDBG_LOW("type 0x%x\n", type);
  867. index = gsi_ctx->gsi_isr_cache_index;
  868. gsi_ctx->gsi_isr_cache[index].timestamp =
  869. sched_clock();
  870. gsi_ctx->gsi_isr_cache[index].qtimer =
  871. __arch_counter_get_cntvct();
  872. gsi_ctx->gsi_isr_cache[index].interrupt_type = type;
  873. gsi_ctx->gsi_isr_cache_index++;
  874. if (gsi_ctx->gsi_isr_cache_index == GSI_ISR_CACHE_MAX)
  875. gsi_ctx->gsi_isr_cache_index = 0;
  876. if(ctx_type_irq.ch_ctrl) {
  877. gsi_handle_ch_ctrl(ee);
  878. break;
  879. }
  880. if (ctx_type_irq.ev_ctrl) {
  881. gsi_handle_ev_ctrl(ee);
  882. break;
  883. }
  884. if (ctx_type_irq.glob_ee)
  885. gsi_handle_glob_ee(ee);
  886. if (ctx_type_irq.ieob)
  887. gsi_handle_ieob(ee);
  888. if (ctx_type_irq.inter_ee_ch_ctrl)
  889. gsi_handle_inter_ee_ch_ctrl(ee);
  890. if (ctx_type_irq.inter_ee_ev_ctrl)
  891. gsi_handle_inter_ee_ev_ctrl(ee);
  892. if (ctx_type_irq.general)
  893. gsi_handle_general(ee);
  894. }
  895. }
  896. static irqreturn_t gsi_isr(int irq, void *ctxt)
  897. {
  898. if (gsi_ctx->per.req_clk_cb) {
  899. bool granted = false;
  900. gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
  901. if (granted) {
  902. gsi_handle_irq();
  903. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  904. }
  905. } else if (!gsi_ctx->per.clk_status_cb()) {
  906. /* we only want to capture the gsi isr storm here */
  907. if (atomic_read(&gsi_ctx->num_unclock_irq) ==
  908. GSI_IRQ_STORM_THR)
  909. gsi_ctx->per.enable_clk_bug_on();
  910. atomic_inc(&gsi_ctx->num_unclock_irq);
  911. return IRQ_HANDLED;
  912. } else {
  913. atomic_set(&gsi_ctx->num_unclock_irq, 0);
  914. gsi_handle_irq();
  915. }
  916. return IRQ_HANDLED;
  917. }
  918. static uint32_t gsi_get_max_channels(enum gsi_ver ver)
  919. {
  920. uint32_t max_ch = 0;
  921. struct gsihal_reg_hw_param hw_param;
  922. struct gsihal_reg_hw_param2 hw_param2;
  923. switch (ver) {
  924. case GSI_VER_ERR:
  925. case GSI_VER_MAX:
  926. GSIERR("GSI version is not supported %d\n", ver);
  927. WARN_ON(1);
  928. break;
  929. case GSI_VER_1_0:
  930. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  931. gsi_ctx->per.ee, &hw_param);
  932. max_ch = hw_param.gsi_ch_num;
  933. break;
  934. case GSI_VER_1_2:
  935. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  936. gsi_ctx->per.ee, &hw_param);
  937. max_ch = hw_param.gsi_ch_num;
  938. break;
  939. default:
  940. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  941. gsi_ctx->per.ee, &hw_param2);
  942. max_ch = hw_param2.gsi_num_ch_per_ee;
  943. break;
  944. }
  945. GSIDBG("max channels %d\n", max_ch);
  946. return max_ch;
  947. }
  948. static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
  949. {
  950. uint32_t max_ev = 0;
  951. struct gsihal_reg_hw_param hw_param;
  952. struct gsihal_reg_hw_param2 hw_param2;
  953. struct gsihal_reg_hw_param4 hw_param4;
  954. switch (ver) {
  955. case GSI_VER_ERR:
  956. case GSI_VER_MAX:
  957. GSIERR("GSI version is not supported %d\n", ver);
  958. WARN_ON(1);
  959. break;
  960. case GSI_VER_1_0:
  961. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM,
  962. gsi_ctx->per.ee, &hw_param);
  963. max_ev = hw_param.gsi_ev_ch_num;
  964. break;
  965. case GSI_VER_1_2:
  966. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_0,
  967. gsi_ctx->per.ee, &hw_param);
  968. max_ev = hw_param.gsi_ev_ch_num;
  969. break;
  970. case GSI_VER_3_0:
  971. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_4,
  972. gsi_ctx->per.ee, &hw_param4);
  973. max_ev = hw_param4.gsi_num_ev_per_ee;
  974. break;
  975. default:
  976. gsihal_read_reg_n_fields(GSI_EE_n_GSI_HW_PARAM_2,
  977. gsi_ctx->per.ee, &hw_param2);
  978. max_ev = hw_param2.gsi_num_ev_per_ee;
  979. break;
  980. }
  981. GSIDBG("max event rings %d\n", max_ev);
  982. return max_ev;
  983. }
  984. int gsi_complete_clk_grant(unsigned long dev_hdl)
  985. {
  986. unsigned long flags;
  987. if (!gsi_ctx) {
  988. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  989. return -GSI_STATUS_NODEV;
  990. }
  991. if (!gsi_ctx->per_registered) {
  992. GSIERR("no client registered\n");
  993. return -GSI_STATUS_INVALID_PARAMS;
  994. }
  995. if (dev_hdl != (uintptr_t)gsi_ctx) {
  996. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  997. gsi_ctx);
  998. return -GSI_STATUS_INVALID_PARAMS;
  999. }
  1000. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1001. gsi_handle_irq();
  1002. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  1003. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1004. return GSI_STATUS_SUCCESS;
  1005. }
  1006. EXPORT_SYMBOL(gsi_complete_clk_grant);
  1007. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  1008. {
  1009. if (!gsi_ctx) {
  1010. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1011. return -GSI_STATUS_NODEV;
  1012. }
  1013. gsi_ctx->base = devm_ioremap(
  1014. gsi_ctx->dev, gsi_base_addr, gsi_size);
  1015. if (!gsi_ctx->base) {
  1016. GSIERR("failed to map access to GSI HW\n");
  1017. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1018. }
  1019. GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
  1020. &gsi_base_addr,
  1021. gsi_ctx->base,
  1022. gsi_size);
  1023. /* initialize HAL before accessing any register */
  1024. gsihal_init(ver, gsi_ctx->base);
  1025. return 0;
  1026. }
  1027. EXPORT_SYMBOL(gsi_map_base);
  1028. int gsi_unmap_base(void)
  1029. {
  1030. if (!gsi_ctx) {
  1031. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1032. return -GSI_STATUS_NODEV;
  1033. }
  1034. if (!gsi_ctx->base) {
  1035. GSIERR("access to GSI HW has not been mapped\n");
  1036. return -GSI_STATUS_INVALID_PARAMS;
  1037. }
  1038. devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
  1039. gsi_ctx->base = NULL;
  1040. return 0;
  1041. }
  1042. EXPORT_SYMBOL(gsi_unmap_base);
  1043. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
  1044. {
  1045. int res;
  1046. struct gsihal_reg_gsi_status gsi_status;
  1047. struct gsihal_reg_gsi_ee_n_cntxt_gsi_irq gen_irq;
  1048. if (!gsi_ctx) {
  1049. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1050. return -GSI_STATUS_NODEV;
  1051. }
  1052. if (!props || !dev_hdl) {
  1053. GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
  1054. return -GSI_STATUS_INVALID_PARAMS;
  1055. }
  1056. if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
  1057. GSIERR("bad params gsi_ver=%d\n", props->ver);
  1058. return -GSI_STATUS_INVALID_PARAMS;
  1059. }
  1060. if (!props->notify_cb) {
  1061. GSIERR("notify callback must be provided\n");
  1062. return -GSI_STATUS_INVALID_PARAMS;
  1063. }
  1064. if (props->req_clk_cb && !props->rel_clk_cb) {
  1065. GSIERR("rel callback must be provided\n");
  1066. return -GSI_STATUS_INVALID_PARAMS;
  1067. }
  1068. if (gsi_ctx->per_registered) {
  1069. GSIERR("per already registered\n");
  1070. return -GSI_STATUS_UNSUPPORTED_OP;
  1071. }
  1072. spin_lock_init(&gsi_ctx->slock);
  1073. gsi_ctx->per = *props;
  1074. if (props->intr == GSI_INTR_IRQ) {
  1075. if (!props->irq) {
  1076. GSIERR("bad irq specified %u\n", props->irq);
  1077. return -GSI_STATUS_INVALID_PARAMS;
  1078. }
  1079. /*
  1080. * On a real UE, there are two separate interrupt
  1081. * vectors that get directed toward the GSI/IPA
  1082. * drivers. They are handled by gsi_isr() and
  1083. * (ipa_isr() or ipa3_isr()) respectively. In the
  1084. * emulation environment, this is not the case;
  1085. * instead, interrupt vectors are routed to the
  1086. * emualation hardware's interrupt controller, which
  1087. * in turn, forwards a single interrupt to the GSI/IPA
  1088. * driver. When the new interrupt vector is received,
  1089. * the driver needs to probe the interrupt
  1090. * controller's registers so see if one, the other, or
  1091. * both interrupts have occurred. Given the above, we
  1092. * now need to handle both situations, namely: the
  1093. * emulator's and the real UE.
  1094. */
  1095. if (running_emulation) {
  1096. /*
  1097. * New scheme involving the emulator's
  1098. * interrupt controller.
  1099. */
  1100. res = devm_request_threaded_irq(
  1101. gsi_ctx->dev,
  1102. props->irq,
  1103. /* top half handler to follow */
  1104. emulator_hard_irq_isr,
  1105. /* threaded bottom half handler to follow */
  1106. emulator_soft_irq_isr,
  1107. IRQF_SHARED,
  1108. "emulator_intcntrlr",
  1109. gsi_ctx);
  1110. } else {
  1111. /*
  1112. * Traditional scheme used on the real UE.
  1113. */
  1114. res = devm_request_irq(gsi_ctx->dev, props->irq,
  1115. gsi_isr,
  1116. props->req_clk_cb ? IRQF_TRIGGER_RISING :
  1117. IRQF_TRIGGER_HIGH,
  1118. "gsi",
  1119. gsi_ctx);
  1120. }
  1121. if (res) {
  1122. GSIERR(
  1123. "failed to register isr for %u\n",
  1124. props->irq);
  1125. return -GSI_STATUS_ERROR;
  1126. }
  1127. GSIDBG(
  1128. "succeeded to register isr for %u\n",
  1129. props->irq);
  1130. res = enable_irq_wake(props->irq);
  1131. if (res)
  1132. GSIERR("failed to enable wake irq %u\n", props->irq);
  1133. else
  1134. GSIERR("GSI irq is wake enabled %u\n", props->irq);
  1135. } else {
  1136. GSIERR("do not support interrupt type %u\n", props->intr);
  1137. return -GSI_STATUS_UNSUPPORTED_OP;
  1138. }
  1139. /*
  1140. * If base not previously mapped via gsi_map_base(), map it
  1141. * now...
  1142. */
  1143. if (!gsi_ctx->base) {
  1144. res = gsi_map_base(props->phys_addr, props->size, props->ver);
  1145. if (res)
  1146. return res;
  1147. }
  1148. if (running_emulation) {
  1149. GSIDBG("GSI SW ver register value 0x%x\n",
  1150. gsihal_read_reg_n(GSI_EE_n_GSI_SW_VERSION, 0));
  1151. gsi_ctx->intcntrlr_mem_size =
  1152. props->emulator_intcntrlr_size;
  1153. gsi_ctx->intcntrlr_base =
  1154. devm_ioremap_nocache(
  1155. gsi_ctx->dev,
  1156. props->emulator_intcntrlr_addr,
  1157. props->emulator_intcntrlr_size);
  1158. if (!gsi_ctx->intcntrlr_base) {
  1159. GSIERR(
  1160. "failed to remap emulator's interrupt controller HW\n");
  1161. gsi_unmap_base();
  1162. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1163. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1164. }
  1165. GSIDBG(
  1166. "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
  1167. &(props->emulator_intcntrlr_addr),
  1168. gsi_ctx->intcntrlr_base,
  1169. props->emulator_intcntrlr_size);
  1170. gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
  1171. gsi_ctx->intcntrlr_client_isr =
  1172. props->emulator_intcntrlr_client_isr;
  1173. }
  1174. gsi_ctx->per_registered = true;
  1175. mutex_init(&gsi_ctx->mlock);
  1176. atomic_set(&gsi_ctx->num_chan, 0);
  1177. atomic_set(&gsi_ctx->num_evt_ring, 0);
  1178. gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
  1179. if (gsi_ctx->max_ch == 0) {
  1180. gsi_unmap_base();
  1181. if (running_emulation)
  1182. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1183. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1184. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1185. GSIERR("failed to get max channels\n");
  1186. return -GSI_STATUS_ERROR;
  1187. }
  1188. gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
  1189. if (gsi_ctx->max_ev == 0) {
  1190. gsi_unmap_base();
  1191. if (running_emulation)
  1192. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1193. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1194. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1195. GSIERR("failed to get max event rings\n");
  1196. return -GSI_STATUS_ERROR;
  1197. }
  1198. if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
  1199. GSIERR("max event rings are beyond absolute maximum\n");
  1200. return -GSI_STATUS_ERROR;
  1201. }
  1202. if (props->mhi_er_id_limits_valid &&
  1203. props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
  1204. gsi_unmap_base();
  1205. if (running_emulation)
  1206. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1207. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1208. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1209. GSIERR("MHI event ring start id %u is beyond max %u\n",
  1210. props->mhi_er_id_limits[0], gsi_ctx->max_ev);
  1211. return -GSI_STATUS_ERROR;
  1212. }
  1213. gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
  1214. /* exclude reserved mhi events */
  1215. if (props->mhi_er_id_limits_valid)
  1216. gsi_ctx->evt_bmap |=
  1217. ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
  1218. ((1 << (props->mhi_er_id_limits[0])) - 1);
  1219. /*
  1220. * enable all interrupts but GSI_BREAK_POINT.
  1221. * Inter EE commands / interrupt are no supported.
  1222. */
  1223. __gsi_config_type_irq(props->ee, ~0, ~0);
  1224. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1225. __gsi_config_all_ch_irq(props->ee, ~0, ~0);
  1226. __gsi_config_all_evt_irq(props->ee, ~0, ~0);
  1227. __gsi_config_all_ieob_irq(props->ee, ~0, ~0);
  1228. }
  1229. else {
  1230. __gsi_config_ch_irq(props->ee, ~0, ~0);
  1231. __gsi_config_evt_irq(props->ee, ~0, ~0);
  1232. __gsi_config_ieob_irq(props->ee, ~0, ~0);
  1233. }
  1234. __gsi_config_glob_irq(props->ee, ~0, ~0);
  1235. /*
  1236. * Disabling global INT1 interrupt by default and enable it
  1237. * onlt when sending the generic command.
  1238. */
  1239. __gsi_config_glob_irq(props->ee,
  1240. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  1241. gen_irq.gsi_mcs_stack_ovrflow = 1;
  1242. gen_irq.gsi_cmd_fifo_ovrflow = 1;
  1243. gen_irq.gsi_bus_error = 1;
  1244. gen_irq.gsi_break_point = 0;
  1245. gsihal_write_reg_n_fields(GSI_EE_n_CNTXT_GSI_IRQ_EN,
  1246. gsi_ctx->per.ee, &gen_irq);
  1247. gsihal_write_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee, props->intr);
  1248. /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
  1249. if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
  1250. (props->intr != GSI_INTR_MSI)) {
  1251. gsihal_write_reg_n(
  1252. GSI_EE_n_CNTXT_MSI_BASE_LSB, gsi_ctx->per.ee, 0);
  1253. gsihal_write_reg_n(
  1254. GSI_EE_n_CNTXT_MSI_BASE_MSB, gsi_ctx->per.ee, 0);
  1255. }
  1256. gsihal_read_reg_n_fields(GSI_EE_n_GSI_STATUS,
  1257. gsi_ctx->per.ee, &gsi_status);
  1258. if (gsi_status.enabled)
  1259. gsi_ctx->enabled = true;
  1260. else
  1261. GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
  1262. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  1263. gsihal_write_reg_n(GSI_EE_n_ERROR_LOG, gsi_ctx->per.ee, 0);
  1264. if (running_emulation) {
  1265. /*
  1266. * Set up the emulator's interrupt controller...
  1267. */
  1268. res = setup_emulator_cntrlr(
  1269. gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
  1270. if (res != 0) {
  1271. gsi_unmap_base();
  1272. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1273. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1274. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1275. GSIERR("setup_emulator_cntrlr() failed\n");
  1276. return res;
  1277. }
  1278. }
  1279. *dev_hdl = (uintptr_t)gsi_ctx;
  1280. gsi_ctx->gsi_isr_cache_index = 0;
  1281. return GSI_STATUS_SUCCESS;
  1282. }
  1283. EXPORT_SYMBOL(gsi_register_device);
  1284. int gsi_write_device_scratch(unsigned long dev_hdl,
  1285. struct gsi_device_scratch *val)
  1286. {
  1287. unsigned int max_usb_pkt_size = 0;
  1288. if (!gsi_ctx) {
  1289. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1290. return -GSI_STATUS_NODEV;
  1291. }
  1292. if (!gsi_ctx->per_registered) {
  1293. GSIERR("no client registered\n");
  1294. return -GSI_STATUS_INVALID_PARAMS;
  1295. }
  1296. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1297. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1298. gsi_ctx);
  1299. return -GSI_STATUS_INVALID_PARAMS;
  1300. }
  1301. if (val->max_usb_pkt_size_valid &&
  1302. val->max_usb_pkt_size != 1024 &&
  1303. val->max_usb_pkt_size != 512 &&
  1304. val->max_usb_pkt_size != 64) {
  1305. GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
  1306. val->max_usb_pkt_size);
  1307. return -GSI_STATUS_INVALID_PARAMS;
  1308. }
  1309. mutex_lock(&gsi_ctx->mlock);
  1310. if (val->mhi_base_chan_idx_valid)
  1311. gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
  1312. val->mhi_base_chan_idx;
  1313. if (val->max_usb_pkt_size_valid) {
  1314. max_usb_pkt_size = 2;
  1315. if (val->max_usb_pkt_size > 64)
  1316. max_usb_pkt_size =
  1317. (val->max_usb_pkt_size == 1024) ? 1 : 0;
  1318. gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
  1319. }
  1320. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  1321. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  1322. mutex_unlock(&gsi_ctx->mlock);
  1323. return GSI_STATUS_SUCCESS;
  1324. }
  1325. EXPORT_SYMBOL(gsi_write_device_scratch);
  1326. int gsi_deregister_device(unsigned long dev_hdl, bool force)
  1327. {
  1328. if (!gsi_ctx) {
  1329. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1330. return -GSI_STATUS_NODEV;
  1331. }
  1332. if (!gsi_ctx->per_registered) {
  1333. GSIERR("no client registered\n");
  1334. return -GSI_STATUS_INVALID_PARAMS;
  1335. }
  1336. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1337. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1338. gsi_ctx);
  1339. return -GSI_STATUS_INVALID_PARAMS;
  1340. }
  1341. if (!force && atomic_read(&gsi_ctx->num_chan)) {
  1342. GSIERR("cannot deregister %u channels are still connected\n",
  1343. atomic_read(&gsi_ctx->num_chan));
  1344. return -GSI_STATUS_UNSUPPORTED_OP;
  1345. }
  1346. if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
  1347. GSIERR("cannot deregister %u events are still connected\n",
  1348. atomic_read(&gsi_ctx->num_evt_ring));
  1349. return -GSI_STATUS_UNSUPPORTED_OP;
  1350. }
  1351. /* disable all interrupts */
  1352. __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
  1353. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1354. __gsi_config_all_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1355. __gsi_config_all_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1356. __gsi_config_all_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1357. }
  1358. else {
  1359. __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1360. __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1361. __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1362. }
  1363. __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
  1364. __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
  1365. devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
  1366. gsihal_destroy();
  1367. gsi_unmap_base();
  1368. memset(gsi_ctx, 0, sizeof(*gsi_ctx));
  1369. return GSI_STATUS_SUCCESS;
  1370. }
  1371. EXPORT_SYMBOL(gsi_deregister_device);
  1372. static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
  1373. uint8_t evt_id, unsigned int ee)
  1374. {
  1375. struct gsihal_reg_ev_ch_k_cntxt_0 ev_ch_k_cntxt_0;
  1376. struct gsihal_reg_ev_ch_k_cntxt_1 ev_ch_k_cntxt_1;
  1377. struct gsihal_reg_ev_ch_k_cntxt_2 ev_ch_k_cntxt_2;
  1378. struct gsihal_reg_ev_ch_k_cntxt_3 ev_ch_k_cntxt_3;
  1379. struct gsihal_reg_ev_ch_k_cntxt_8 ev_ch_k_cntxt_8;
  1380. struct gsihal_reg_ev_ch_k_cntxt_9 ev_ch_k_cntxt_9;
  1381. struct gsihal_reg_ev_ch_k_cntxt_10 ev_ch_k_cntxt_10;
  1382. struct gsihal_reg_ev_ch_k_cntxt_11 ev_ch_k_cntxt_11;
  1383. struct gsihal_reg_ev_ch_k_cntxt_12 ev_ch_k_cntxt_12;
  1384. struct gsihal_reg_ev_ch_k_cntxt_13 ev_ch_k_cntxt_13;
  1385. GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
  1386. props->re_size);
  1387. ev_ch_k_cntxt_0.chtype = props->intf;
  1388. ev_ch_k_cntxt_0.intype = props->intr;
  1389. ev_ch_k_cntxt_0.element_size = props->re_size;
  1390. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_0,
  1391. ee, evt_id, &ev_ch_k_cntxt_0);
  1392. ev_ch_k_cntxt_1.r_length = props->ring_len;
  1393. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_1,
  1394. ee, evt_id,
  1395. &ev_ch_k_cntxt_1);
  1396. ev_ch_k_cntxt_2.r_base_addr_lsbs = GSI_LSB(props->ring_base_addr);
  1397. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_2,
  1398. ee, evt_id,
  1399. &ev_ch_k_cntxt_2);
  1400. ev_ch_k_cntxt_3.r_base_addr_msbs = GSI_MSB(props->ring_base_addr);
  1401. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_3,
  1402. ee, evt_id,
  1403. &ev_ch_k_cntxt_3);
  1404. ev_ch_k_cntxt_8.int_modt = props->int_modt;
  1405. ev_ch_k_cntxt_8.int_modc = props->int_modc;
  1406. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_8,
  1407. ee, evt_id,
  1408. &ev_ch_k_cntxt_8);
  1409. ev_ch_k_cntxt_9.intvec = props->intvec;
  1410. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_9,
  1411. ee, evt_id,
  1412. &ev_ch_k_cntxt_9);
  1413. ev_ch_k_cntxt_10.msi_addr_lsb = GSI_LSB(props->msi_addr);
  1414. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_10,
  1415. ee, evt_id,
  1416. &ev_ch_k_cntxt_10);
  1417. ev_ch_k_cntxt_11.msi_addr_msb = GSI_MSB(props->msi_addr);
  1418. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_11,
  1419. ee, evt_id,
  1420. &ev_ch_k_cntxt_11);
  1421. ev_ch_k_cntxt_12.rp_update_addr_lsb = GSI_LSB(props->rp_update_addr);
  1422. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_12,
  1423. ee, evt_id,
  1424. &ev_ch_k_cntxt_12);
  1425. ev_ch_k_cntxt_13.rp_update_addr_msb = GSI_MSB(props->rp_update_addr);
  1426. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_CNTXT_13,
  1427. ee, evt_id,
  1428. &ev_ch_k_cntxt_13);
  1429. }
  1430. static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
  1431. struct gsi_ring_ctx *ctx)
  1432. {
  1433. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1434. ctx->base = props->ring_base_addr;
  1435. ctx->wp = ctx->base;
  1436. ctx->rp = ctx->base;
  1437. ctx->wp_local = ctx->base;
  1438. ctx->rp_local = ctx->base;
  1439. ctx->len = props->ring_len;
  1440. ctx->elem_sz = props->re_size;
  1441. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1442. ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
  1443. if (props->rp_update_vaddr)
  1444. *(uint64_t *)(props->rp_update_vaddr) = ctx->rp_local;
  1445. }
  1446. static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
  1447. {
  1448. unsigned long flags;
  1449. struct gsihal_reg_gsi_ee_n_ev_ch_k_doorbell_1 db;
  1450. spin_lock_irqsave(&ctx->ring.slock, flags);
  1451. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1452. ctx->ring.wp_local = ctx->ring.base +
  1453. ctx->ring.max_num_elem * ctx->ring.elem_sz;
  1454. /* write order MUST be MSB followed by LSB */
  1455. db.write_ptr_msb = GSI_MSB(ctx->ring.wp_local);
  1456. gsihal_write_reg_nk_fields(GSI_EE_n_EV_CH_k_DOORBELL_1,
  1457. gsi_ctx->per.ee, ctx->id, &db);
  1458. gsi_ring_evt_doorbell(ctx);
  1459. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1460. }
  1461. static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
  1462. {
  1463. unsigned long flags;
  1464. spin_lock_irqsave(&ctx->ring.slock, flags);
  1465. if (ctx->ring.base_va)
  1466. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1467. ctx->ring.wp_local = ctx->ring.base +
  1468. ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
  1469. gsi_ring_evt_doorbell(ctx);
  1470. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1471. }
  1472. static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
  1473. {
  1474. uint64_t ra;
  1475. if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
  1476. props->ring_len % 4) ||
  1477. (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
  1478. props->ring_len % 8) ||
  1479. (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
  1480. props->ring_len % 16) ||
  1481. (props->re_size == GSI_EVT_RING_RE_SIZE_32B &&
  1482. props->ring_len % 32)) {
  1483. GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
  1484. props->ring_len, props->re_size);
  1485. return -GSI_STATUS_INVALID_PARAMS;
  1486. }
  1487. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  1488. return -GSI_STATUS_INVALID_PARAMS;
  1489. ra = props->ring_base_addr;
  1490. do_div(ra, roundup_pow_of_two(props->ring_len));
  1491. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1492. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1493. props->ring_base_addr,
  1494. roundup_pow_of_two(props->ring_len));
  1495. return -GSI_STATUS_INVALID_PARAMS;
  1496. }
  1497. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1498. !props->ring_base_vaddr) {
  1499. GSIERR("protocol %u requires ring base VA\n", props->intf);
  1500. return -GSI_STATUS_INVALID_PARAMS;
  1501. }
  1502. if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
  1503. (!props->evchid_valid ||
  1504. props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
  1505. props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
  1506. GSIERR("MHI requires evchid valid=%d val=%u\n",
  1507. props->evchid_valid, props->evchid);
  1508. return -GSI_STATUS_INVALID_PARAMS;
  1509. }
  1510. if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
  1511. props->evchid_valid) {
  1512. GSIERR("protocol %u cannot specify evchid\n", props->intf);
  1513. return -GSI_STATUS_INVALID_PARAMS;
  1514. }
  1515. if (!props->err_cb) {
  1516. GSIERR("err callback must be provided\n");
  1517. return -GSI_STATUS_INVALID_PARAMS;
  1518. }
  1519. return GSI_STATUS_SUCCESS;
  1520. }
  1521. /**
  1522. * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
  1523. * by IPA driver. Need to do this in GSI since only GSI knows which TRE
  1524. * are being used or not. However, IPA is the one that does cleaning,
  1525. * therefore we pass a callback from IPA and call it using params from GSI
  1526. *
  1527. * @chan_hdl: hdl of the gsi channel user data array to be cleaned
  1528. * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
  1529. * @chan_user_data: ipa_sys_context of the gsi_channel
  1530. * @xfer_uder_data: user data array element (rx_pkt wrapper)
  1531. *
  1532. * Returns: 0 on success, negative on failure
  1533. */
  1534. static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
  1535. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
  1536. {
  1537. struct gsi_chan_ctx *ctx;
  1538. uint64_t i;
  1539. uint16_t rp_idx;
  1540. ctx = &gsi_ctx->chan[chan_hdl];
  1541. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  1542. GSIERR("bad state %d\n", ctx->state);
  1543. return -GSI_STATUS_UNSUPPORTED_OP;
  1544. }
  1545. /* for coalescing, traverse the whole array */
  1546. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  1547. size_t user_data_size =
  1548. ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
  1549. for (i = 0; i < user_data_size; i++) {
  1550. if (ctx->user_data[i].valid)
  1551. cleanup_cb(ctx->props.chan_user_data,
  1552. ctx->user_data[i].p);
  1553. }
  1554. } else {
  1555. /* for non-coalescing, clean between RP and WP */
  1556. while (ctx->ring.rp_local != ctx->ring.wp_local) {
  1557. rp_idx = gsi_find_idx_from_addr(&ctx->ring,
  1558. ctx->ring.rp_local);
  1559. WARN_ON(!ctx->user_data[rp_idx].valid);
  1560. cleanup_cb(ctx->props.chan_user_data,
  1561. ctx->user_data[rp_idx].p);
  1562. gsi_incr_ring_rp(&ctx->ring);
  1563. }
  1564. }
  1565. return 0;
  1566. }
  1567. /**
  1568. * gsi_read_event_ring_rp_ddr - function returns the RP value of the event
  1569. * ring read from the ring context register.
  1570. *
  1571. * @props: Props structere of the event channel
  1572. * @id: Event channel index
  1573. * @ee: EE
  1574. *
  1575. * @Return pointer to the read pointer
  1576. */
  1577. static inline uint64_t gsi_read_event_ring_rp_ddr(struct gsi_evt_ring_props* props,
  1578. uint8_t id, int ee)
  1579. {
  1580. return readl_relaxed(props->rp_update_vaddr);
  1581. }
  1582. /**
  1583. * gsi_read_event_ring_rp_reg - function returns the RP value of the event ring
  1584. * read from the DDR.
  1585. *
  1586. * @props: Props structere of the event channel
  1587. * @id: Event channel index
  1588. * @ee: EE
  1589. *
  1590. * @Return pointer to the read pointer
  1591. */
  1592. static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
  1593. uint8_t id, int ee)
  1594. {
  1595. return gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
  1596. }
  1597. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1598. unsigned long *evt_ring_hdl)
  1599. {
  1600. unsigned long evt_id;
  1601. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
  1602. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1603. struct gsi_evt_ctx *ctx;
  1604. int res;
  1605. int ee;
  1606. unsigned long flags;
  1607. if (!gsi_ctx) {
  1608. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1609. return -GSI_STATUS_NODEV;
  1610. }
  1611. if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  1612. GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
  1613. props, dev_hdl, evt_ring_hdl);
  1614. return -GSI_STATUS_INVALID_PARAMS;
  1615. }
  1616. if (gsi_validate_evt_ring_props(props)) {
  1617. GSIERR("invalid params\n");
  1618. return -GSI_STATUS_INVALID_PARAMS;
  1619. }
  1620. if (!props->evchid_valid) {
  1621. mutex_lock(&gsi_ctx->mlock);
  1622. evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
  1623. sizeof(unsigned long) * BITS_PER_BYTE);
  1624. if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
  1625. GSIERR("failed to alloc event ID\n");
  1626. mutex_unlock(&gsi_ctx->mlock);
  1627. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1628. }
  1629. set_bit(evt_id, &gsi_ctx->evt_bmap);
  1630. mutex_unlock(&gsi_ctx->mlock);
  1631. } else {
  1632. evt_id = props->evchid;
  1633. }
  1634. GSIDBG("Using %lu as virt evt id\n", evt_id);
  1635. if (props->rp_update_addr != 0) {
  1636. GSIDBG("Using DDR to read event RP for virt evt id: %lu\n",
  1637. evt_id);
  1638. props->gsi_read_event_ring_rp =
  1639. gsi_read_event_ring_rp_ddr;
  1640. }
  1641. else {
  1642. GSIDBG("Using CONTEXT reg to read event RP for virt evt id: %lu\n",
  1643. evt_id);
  1644. props->gsi_read_event_ring_rp =
  1645. gsi_read_event_ring_rp_reg;
  1646. }
  1647. ctx = &gsi_ctx->evtr[evt_id];
  1648. memset(ctx, 0, sizeof(*ctx));
  1649. mutex_init(&ctx->mlock);
  1650. init_completion(&ctx->compl);
  1651. atomic_set(&ctx->chan_ref_cnt, 0);
  1652. ctx->props = *props;
  1653. mutex_lock(&gsi_ctx->mlock);
  1654. ee = gsi_ctx->per.ee;
  1655. ev_ch_cmd.opcode = op;
  1656. ev_ch_cmd.chid = evt_id;
  1657. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD, ee, &ev_ch_cmd);
  1658. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1659. if (res == 0) {
  1660. GSIERR("evt_id=%lu timed out\n", evt_id);
  1661. if (!props->evchid_valid)
  1662. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1663. mutex_unlock(&gsi_ctx->mlock);
  1664. return -GSI_STATUS_TIMED_OUT;
  1665. }
  1666. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1667. GSIERR("evt_id=%lu allocation failed state=%u\n",
  1668. evt_id, ctx->state);
  1669. if (!props->evchid_valid)
  1670. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1671. mutex_unlock(&gsi_ctx->mlock);
  1672. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1673. }
  1674. gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
  1675. spin_lock_init(&ctx->ring.slock);
  1676. gsi_init_evt_ring(props, &ctx->ring);
  1677. ctx->id = evt_id;
  1678. *evt_ring_hdl = evt_id;
  1679. atomic_inc(&gsi_ctx->num_evt_ring);
  1680. if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
  1681. gsi_prime_evt_ring(ctx);
  1682. else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
  1683. gsi_prime_evt_ring_wdi(ctx);
  1684. mutex_unlock(&gsi_ctx->mlock);
  1685. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1686. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1687. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
  1688. gsihal_get_ch_reg_idx(evt_id), gsihal_get_ch_reg_mask(evt_id));
  1689. }
  1690. else {
  1691. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR, ee, 1 << evt_id);
  1692. }
  1693. /* enable ieob interrupts for GPI, enable MSI interrupts */
  1694. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  1695. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1696. (props->intr != GSI_INTR_MSI))
  1697. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1698. gsihal_get_ch_reg_mask(evt_id),
  1699. 0);
  1700. else
  1701. __gsi_config_ieob_irq_k(gsi_ctx->per.ee, gsihal_get_ch_reg_idx(evt_id),
  1702. gsihal_get_ch_reg_mask(evt_id),
  1703. ~0);
  1704. }
  1705. else {
  1706. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1707. (props->intr != GSI_INTR_MSI))
  1708. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
  1709. else
  1710. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
  1711. }
  1712. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1713. return GSI_STATUS_SUCCESS;
  1714. }
  1715. EXPORT_SYMBOL(gsi_alloc_evt_ring);
  1716. static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1717. union __packed gsi_evt_scratch val)
  1718. {
  1719. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_0,
  1720. gsi_ctx->per.ee, evt_ring_hdl, val.data.word1);
  1721. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_SCRATCH_1,
  1722. gsi_ctx->per.ee, evt_ring_hdl, val.data.word2);
  1723. }
  1724. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1725. union __packed gsi_evt_scratch val)
  1726. {
  1727. struct gsi_evt_ctx *ctx;
  1728. if (!gsi_ctx) {
  1729. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1730. return -GSI_STATUS_NODEV;
  1731. }
  1732. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1733. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1734. return -GSI_STATUS_INVALID_PARAMS;
  1735. }
  1736. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1737. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1738. GSIERR("bad state %d\n",
  1739. gsi_ctx->evtr[evt_ring_hdl].state);
  1740. return -GSI_STATUS_UNSUPPORTED_OP;
  1741. }
  1742. mutex_lock(&ctx->mlock);
  1743. ctx->scratch = val;
  1744. __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
  1745. mutex_unlock(&ctx->mlock);
  1746. return GSI_STATUS_SUCCESS;
  1747. }
  1748. EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
  1749. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
  1750. {
  1751. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1752. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
  1753. struct gsi_evt_ctx *ctx;
  1754. int res;
  1755. if (!gsi_ctx) {
  1756. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1757. return -GSI_STATUS_NODEV;
  1758. }
  1759. if (evt_ring_hdl >= gsi_ctx->max_ev ||
  1760. evt_ring_hdl >= GSI_EVT_RING_MAX) {
  1761. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1762. return -GSI_STATUS_INVALID_PARAMS;
  1763. }
  1764. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1765. if (atomic_read(&ctx->chan_ref_cnt)) {
  1766. GSIERR("%d channels still using this event ring\n",
  1767. atomic_read(&ctx->chan_ref_cnt));
  1768. return -GSI_STATUS_UNSUPPORTED_OP;
  1769. }
  1770. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1771. GSIERR("bad state %d\n", ctx->state);
  1772. return -GSI_STATUS_UNSUPPORTED_OP;
  1773. }
  1774. mutex_lock(&gsi_ctx->mlock);
  1775. reinit_completion(&ctx->compl);
  1776. ev_ch_cmd.chid = evt_ring_hdl;
  1777. ev_ch_cmd.opcode = op;
  1778. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  1779. gsi_ctx->per.ee, &ev_ch_cmd);
  1780. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1781. if (res == 0) {
  1782. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1783. mutex_unlock(&gsi_ctx->mlock);
  1784. return -GSI_STATUS_TIMED_OUT;
  1785. }
  1786. if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1787. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1788. ctx->state);
  1789. /*
  1790. * IPA Hardware returned GSI RING not allocated, which is
  1791. * unexpected hardware state.
  1792. */
  1793. GSI_ASSERT();
  1794. }
  1795. mutex_unlock(&gsi_ctx->mlock);
  1796. if (!ctx->props.evchid_valid) {
  1797. mutex_lock(&gsi_ctx->mlock);
  1798. clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
  1799. mutex_unlock(&gsi_ctx->mlock);
  1800. }
  1801. atomic_dec(&gsi_ctx->num_evt_ring);
  1802. return GSI_STATUS_SUCCESS;
  1803. }
  1804. EXPORT_SYMBOL(gsi_dealloc_evt_ring);
  1805. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1806. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  1807. {
  1808. struct gsi_evt_ctx *ctx;
  1809. if (!gsi_ctx) {
  1810. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1811. return -GSI_STATUS_NODEV;
  1812. }
  1813. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  1814. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  1815. db_addr_wp_lsb);
  1816. return -GSI_STATUS_INVALID_PARAMS;
  1817. }
  1818. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1819. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1820. return -GSI_STATUS_INVALID_PARAMS;
  1821. }
  1822. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1823. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1824. GSIERR("bad state %d\n",
  1825. gsi_ctx->evtr[evt_ring_hdl].state);
  1826. return -GSI_STATUS_UNSUPPORTED_OP;
  1827. }
  1828. *db_addr_wp_lsb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  1829. GSI_EE_n_EV_CH_k_DOORBELL_0, gsi_ctx->per.ee, evt_ring_hdl);
  1830. *db_addr_wp_msb = gsi_ctx->per.phys_addr + gsihal_get_reg_nk_ofst(
  1831. GSI_EE_n_EV_CH_k_DOORBELL_1, gsi_ctx->per.ee, evt_ring_hdl);
  1832. return GSI_STATUS_SUCCESS;
  1833. }
  1834. EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
  1835. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
  1836. {
  1837. struct gsi_evt_ctx *ctx;
  1838. if (!gsi_ctx) {
  1839. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1840. return -GSI_STATUS_NODEV;
  1841. }
  1842. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1843. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1844. return -GSI_STATUS_INVALID_PARAMS;
  1845. }
  1846. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1847. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1848. GSIERR("bad state %d\n",
  1849. gsi_ctx->evtr[evt_ring_hdl].state);
  1850. return -GSI_STATUS_UNSUPPORTED_OP;
  1851. }
  1852. ctx->ring.wp_local = value;
  1853. gsi_ring_evt_doorbell(ctx);
  1854. return GSI_STATUS_SUCCESS;
  1855. }
  1856. EXPORT_SYMBOL(gsi_ring_evt_ring_db);
  1857. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
  1858. {
  1859. struct gsi_chan_ctx *ctx;
  1860. if (!gsi_ctx) {
  1861. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1862. return -GSI_STATUS_NODEV;
  1863. }
  1864. if (chan_hdl >= gsi_ctx->max_ch) {
  1865. GSIERR("bad chan_hdl=%lu\n", chan_hdl);
  1866. return -GSI_STATUS_INVALID_PARAMS;
  1867. }
  1868. ctx = &gsi_ctx->chan[chan_hdl];
  1869. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  1870. GSIERR("bad state %d\n", ctx->state);
  1871. return -GSI_STATUS_UNSUPPORTED_OP;
  1872. }
  1873. ctx->ring.wp_local = value;
  1874. /* write MSB first */
  1875. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  1876. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  1877. gsi_ring_chan_doorbell(ctx);
  1878. return GSI_STATUS_SUCCESS;
  1879. }
  1880. EXPORT_SYMBOL(gsi_ring_ch_ring_db);
  1881. int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
  1882. {
  1883. struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
  1884. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
  1885. struct gsi_evt_ctx *ctx;
  1886. int res;
  1887. if (!gsi_ctx) {
  1888. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1889. return -GSI_STATUS_NODEV;
  1890. }
  1891. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1892. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1893. return -GSI_STATUS_INVALID_PARAMS;
  1894. }
  1895. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1896. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1897. GSIERR("bad state %d\n", ctx->state);
  1898. return -GSI_STATUS_UNSUPPORTED_OP;
  1899. }
  1900. mutex_lock(&gsi_ctx->mlock);
  1901. reinit_completion(&ctx->compl);
  1902. ev_ch_cmd.chid = evt_ring_hdl;
  1903. ev_ch_cmd.opcode = op;
  1904. gsihal_write_reg_n_fields(GSI_EE_n_EV_CH_CMD,
  1905. gsi_ctx->per.ee, &ev_ch_cmd);
  1906. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1907. if (res == 0) {
  1908. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1909. mutex_unlock(&gsi_ctx->mlock);
  1910. return -GSI_STATUS_TIMED_OUT;
  1911. }
  1912. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1913. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1914. ctx->state);
  1915. /*
  1916. * IPA Hardware returned GSI RING not allocated, which is
  1917. * unexpected. Indicates hardware instability.
  1918. */
  1919. GSI_ASSERT();
  1920. }
  1921. gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
  1922. gsi_init_evt_ring(&ctx->props, &ctx->ring);
  1923. /* restore scratch */
  1924. __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
  1925. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
  1926. gsi_prime_evt_ring(ctx);
  1927. if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
  1928. gsi_prime_evt_ring_wdi(ctx);
  1929. mutex_unlock(&gsi_ctx->mlock);
  1930. return GSI_STATUS_SUCCESS;
  1931. }
  1932. EXPORT_SYMBOL(gsi_reset_evt_ring);
  1933. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1934. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1935. {
  1936. struct gsi_evt_ctx *ctx;
  1937. if (!gsi_ctx) {
  1938. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1939. return -GSI_STATUS_NODEV;
  1940. }
  1941. if (!props || !scr) {
  1942. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  1943. return -GSI_STATUS_INVALID_PARAMS;
  1944. }
  1945. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1946. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1947. return -GSI_STATUS_INVALID_PARAMS;
  1948. }
  1949. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1950. if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1951. GSIERR("bad state %d\n", ctx->state);
  1952. return -GSI_STATUS_UNSUPPORTED_OP;
  1953. }
  1954. mutex_lock(&ctx->mlock);
  1955. *props = ctx->props;
  1956. *scr = ctx->scratch;
  1957. mutex_unlock(&ctx->mlock);
  1958. return GSI_STATUS_SUCCESS;
  1959. }
  1960. EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
  1961. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1962. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1963. {
  1964. struct gsi_evt_ctx *ctx;
  1965. if (!gsi_ctx) {
  1966. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1967. return -GSI_STATUS_NODEV;
  1968. }
  1969. if (!props || gsi_validate_evt_ring_props(props)) {
  1970. GSIERR("bad params props=%pK\n", props);
  1971. return -GSI_STATUS_INVALID_PARAMS;
  1972. }
  1973. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1974. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1975. return -GSI_STATUS_INVALID_PARAMS;
  1976. }
  1977. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1978. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1979. GSIERR("bad state %d\n", ctx->state);
  1980. return -GSI_STATUS_UNSUPPORTED_OP;
  1981. }
  1982. if (ctx->props.exclusive != props->exclusive) {
  1983. GSIERR("changing immutable fields not supported\n");
  1984. return -GSI_STATUS_UNSUPPORTED_OP;
  1985. }
  1986. mutex_lock(&ctx->mlock);
  1987. ctx->props = *props;
  1988. if (scr)
  1989. ctx->scratch = *scr;
  1990. mutex_unlock(&ctx->mlock);
  1991. return gsi_reset_evt_ring(evt_ring_hdl);
  1992. }
  1993. EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
  1994. static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
  1995. unsigned int ee)
  1996. {
  1997. struct gsihal_reg_gsi_ee_n_gsi_ch_k_qos ch_k_qos;
  1998. ch_k_qos.wrr_weight = props->low_weight;
  1999. ch_k_qos.max_prefetch = props->max_prefetch;
  2000. ch_k_qos.use_db_eng = props->use_db_eng;
  2001. if (gsi_ctx->per.ver >= GSI_VER_2_0) {
  2002. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  2003. ch_k_qos.use_escape_buf_only = props->prefetch_mode;
  2004. } else {
  2005. ch_k_qos.prefetch_mode = props->prefetch_mode;
  2006. ch_k_qos.empty_lvl_thrshold =
  2007. props->empty_lvl_threshold;
  2008. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  2009. ch_k_qos.db_in_bytes = props->db_in_bytes;
  2010. }
  2011. }
  2012. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_QOS,
  2013. ee, props->ch_id, &ch_k_qos);
  2014. }
  2015. static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
  2016. uint8_t erindex)
  2017. {
  2018. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  2019. struct gsihal_reg_ch_k_cntxt_1 ch_k_cntxt_1;
  2020. switch (props->prot) {
  2021. case GSI_CHAN_PROT_MHI:
  2022. case GSI_CHAN_PROT_XHCI:
  2023. case GSI_CHAN_PROT_GPI:
  2024. case GSI_CHAN_PROT_XDCI:
  2025. case GSI_CHAN_PROT_WDI2:
  2026. case GSI_CHAN_PROT_WDI3:
  2027. case GSI_CHAN_PROT_GCI:
  2028. case GSI_CHAN_PROT_MHIP:
  2029. ch_k_cntxt_0.chtype_protocol_msb = 0;
  2030. break;
  2031. case GSI_CHAN_PROT_AQC:
  2032. case GSI_CHAN_PROT_11AD:
  2033. case GSI_CHAN_PROT_RTK:
  2034. case GSI_CHAN_PROT_QDSS:
  2035. ch_k_cntxt_0.chtype_protocol_msb = 1;
  2036. break;
  2037. default:
  2038. GSIERR("Unsupported protocol %d\n", props->prot);
  2039. WARN_ON(1);
  2040. return;
  2041. }
  2042. ch_k_cntxt_0.chtype_protocol = props->prot;
  2043. ch_k_cntxt_0.chtype_dir = props->dir;
  2044. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2045. ch_k_cntxt_1.erindex = erindex;
  2046. } else {
  2047. ch_k_cntxt_0.erindex = erindex;
  2048. }
  2049. ch_k_cntxt_0.element_size = props->re_size;
  2050. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2051. ee, props->ch_id, &ch_k_cntxt_0);
  2052. ch_k_cntxt_1.r_length = props->ring_len;
  2053. gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2054. ee, props->ch_id, &ch_k_cntxt_1);
  2055. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2056. ee, props->ch_id, GSI_LSB(props->ring_base_addr));
  2057. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2058. ee, props->ch_id, GSI_MSB(props->ring_base_addr));
  2059. gsi_program_chan_ctx_qos(props, ee);
  2060. }
  2061. static void gsi_init_chan_ring(struct gsi_chan_props *props,
  2062. struct gsi_ring_ctx *ctx)
  2063. {
  2064. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  2065. ctx->base = props->ring_base_addr;
  2066. ctx->wp = ctx->base;
  2067. ctx->rp = ctx->base;
  2068. ctx->wp_local = ctx->base;
  2069. ctx->rp_local = ctx->base;
  2070. ctx->len = props->ring_len;
  2071. ctx->elem_sz = props->re_size;
  2072. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  2073. ctx->end = ctx->base + (ctx->max_num_elem + 1) *
  2074. ctx->elem_sz;
  2075. }
  2076. static int gsi_validate_channel_props(struct gsi_chan_props *props)
  2077. {
  2078. uint64_t ra;
  2079. uint64_t last;
  2080. if (props->ch_id >= gsi_ctx->max_ch) {
  2081. GSIERR("ch_id %u invalid\n", props->ch_id);
  2082. return -GSI_STATUS_INVALID_PARAMS;
  2083. }
  2084. if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
  2085. props->ring_len % 4) ||
  2086. (props->re_size == GSI_CHAN_RE_SIZE_8B &&
  2087. props->ring_len % 8) ||
  2088. (props->re_size == GSI_CHAN_RE_SIZE_16B &&
  2089. props->ring_len % 16) ||
  2090. (props->re_size == GSI_CHAN_RE_SIZE_32B &&
  2091. props->ring_len % 32) ||
  2092. (props->re_size == GSI_CHAN_RE_SIZE_64B &&
  2093. props->ring_len % 64)) {
  2094. GSIERR("bad params ring_len %u not a multiple of re size %u\n",
  2095. props->ring_len, props->re_size);
  2096. return -GSI_STATUS_INVALID_PARAMS;
  2097. }
  2098. if (!gsihal_check_ring_length_valid(props->ring_len, props->re_size))
  2099. return -GSI_STATUS_INVALID_PARAMS;
  2100. ra = props->ring_base_addr;
  2101. do_div(ra, roundup_pow_of_two(props->ring_len));
  2102. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  2103. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  2104. props->ring_base_addr,
  2105. roundup_pow_of_two(props->ring_len));
  2106. return -GSI_STATUS_INVALID_PARAMS;
  2107. }
  2108. last = props->ring_base_addr + props->ring_len - props->re_size;
  2109. /* MSB should stay same within the ring */
  2110. if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
  2111. (last & 0xFFFFFFFF00000000ULL)) {
  2112. GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
  2113. props->ring_base_addr,
  2114. props->ring_len);
  2115. return -GSI_STATUS_INVALID_PARAMS;
  2116. }
  2117. if (props->prot == GSI_CHAN_PROT_GPI &&
  2118. !props->ring_base_vaddr) {
  2119. GSIERR("protocol %u requires ring base VA\n", props->prot);
  2120. return -GSI_STATUS_INVALID_PARAMS;
  2121. }
  2122. if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
  2123. GSIERR("invalid channel low weight %u\n", props->low_weight);
  2124. return -GSI_STATUS_INVALID_PARAMS;
  2125. }
  2126. if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
  2127. GSIERR("xfer callback must be provided\n");
  2128. return -GSI_STATUS_INVALID_PARAMS;
  2129. }
  2130. if (!props->err_cb) {
  2131. GSIERR("err callback must be provided\n");
  2132. return -GSI_STATUS_INVALID_PARAMS;
  2133. }
  2134. return GSI_STATUS_SUCCESS;
  2135. }
  2136. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  2137. unsigned long *chan_hdl)
  2138. {
  2139. struct gsi_chan_ctx *ctx;
  2140. int res;
  2141. int ee;
  2142. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2143. uint8_t erindex;
  2144. struct gsi_user_data *user_data;
  2145. size_t user_data_size;
  2146. if (!gsi_ctx) {
  2147. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2148. return -GSI_STATUS_NODEV;
  2149. }
  2150. if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  2151. GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
  2152. props, dev_hdl, chan_hdl);
  2153. return -GSI_STATUS_INVALID_PARAMS;
  2154. }
  2155. if (gsi_validate_channel_props(props)) {
  2156. GSIERR("bad params\n");
  2157. return -GSI_STATUS_INVALID_PARAMS;
  2158. }
  2159. if (props->evt_ring_hdl != ~0) {
  2160. if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
  2161. GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
  2162. return -GSI_STATUS_INVALID_PARAMS;
  2163. }
  2164. if (atomic_read(
  2165. &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
  2166. gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
  2167. gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
  2168. GSI_CHAN_PROT_GCI) {
  2169. GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
  2170. props->evt_ring_hdl, chan_hdl);
  2171. return -GSI_STATUS_UNSUPPORTED_OP;
  2172. }
  2173. }
  2174. ctx = &gsi_ctx->chan[props->ch_id];
  2175. if (ctx->allocated) {
  2176. GSIERR("chan %d already allocated\n", props->ch_id);
  2177. return -GSI_STATUS_NODEV;
  2178. }
  2179. memset(ctx, 0, sizeof(*ctx));
  2180. /* For IPA offloaded WDI channels not required user_data pointer */
  2181. if (props->prot != GSI_CHAN_PROT_WDI2 &&
  2182. props->prot != GSI_CHAN_PROT_WDI3)
  2183. user_data_size = props->ring_len / props->re_size;
  2184. else
  2185. user_data_size = props->re_size;
  2186. /*
  2187. * GCI channels might have OOO event completions up to GSI_VEID_MAX.
  2188. * user_data needs to be large enough to accommodate those.
  2189. * TODO: increase user data size if GSI_VEID_MAX is not enough
  2190. */
  2191. if (props->prot == GSI_CHAN_PROT_GCI)
  2192. user_data_size += GSI_VEID_MAX;
  2193. user_data = devm_kzalloc(gsi_ctx->dev,
  2194. user_data_size * sizeof(*user_data),
  2195. GFP_KERNEL);
  2196. if (user_data == NULL) {
  2197. GSIERR("context not allocated\n");
  2198. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2199. }
  2200. mutex_init(&ctx->mlock);
  2201. init_completion(&ctx->compl);
  2202. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2203. ctx->props = *props;
  2204. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2205. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2206. mutex_lock(&gsi_ctx->mlock);
  2207. ee = gsi_ctx->per.ee;
  2208. gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
  2209. ch_cmd.chid = props->ch_id;
  2210. ch_cmd.opcode = op;
  2211. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2212. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2213. if (res == 0) {
  2214. GSIERR("chan_hdl=%u timed out\n", props->ch_id);
  2215. mutex_unlock(&gsi_ctx->mlock);
  2216. devm_kfree(gsi_ctx->dev, user_data);
  2217. return -GSI_STATUS_TIMED_OUT;
  2218. }
  2219. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2220. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2221. props->ch_id, ctx->state);
  2222. mutex_unlock(&gsi_ctx->mlock);
  2223. devm_kfree(gsi_ctx->dev, user_data);
  2224. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2225. }
  2226. mutex_unlock(&gsi_ctx->mlock);
  2227. } else {
  2228. mutex_lock(&gsi_ctx->mlock);
  2229. ctx->state = GSI_CHAN_STATE_ALLOCATED;
  2230. mutex_unlock(&gsi_ctx->mlock);
  2231. }
  2232. erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
  2233. GSI_NO_EVT_ERINDEX;
  2234. if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
  2235. GSIERR("invalid erindex %u\n", erindex);
  2236. devm_kfree(gsi_ctx->dev, user_data);
  2237. return -GSI_STATUS_INVALID_PARAMS;
  2238. }
  2239. if (erindex < GSI_EVT_RING_MAX) {
  2240. ctx->evtr = &gsi_ctx->evtr[erindex];
  2241. if (props->prot != GSI_CHAN_PROT_GCI)
  2242. atomic_inc(&ctx->evtr->chan_ref_cnt);
  2243. if (props->prot != GSI_CHAN_PROT_GCI &&
  2244. ctx->evtr->props.exclusive &&
  2245. atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
  2246. ctx->evtr->chan = ctx;
  2247. }
  2248. gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
  2249. spin_lock_init(&ctx->ring.slock);
  2250. gsi_init_chan_ring(props, &ctx->ring);
  2251. if (!props->max_re_expected)
  2252. ctx->props.max_re_expected = ctx->ring.max_num_elem;
  2253. ctx->user_data = user_data;
  2254. *chan_hdl = props->ch_id;
  2255. ctx->allocated = true;
  2256. ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
  2257. atomic_inc(&gsi_ctx->num_chan);
  2258. if (props->prot == GSI_CHAN_PROT_GCI) {
  2259. gsi_ctx->coal_info.ch_id = props->ch_id;
  2260. gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
  2261. }
  2262. return GSI_STATUS_SUCCESS;
  2263. }
  2264. EXPORT_SYMBOL(gsi_alloc_channel);
  2265. static int gsi_alloc_ap_channel(unsigned int chan_hdl)
  2266. {
  2267. struct gsi_chan_ctx *ctx;
  2268. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2269. int res;
  2270. int ee;
  2271. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2272. if (!gsi_ctx) {
  2273. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2274. return -GSI_STATUS_NODEV;
  2275. }
  2276. ctx = &gsi_ctx->chan[chan_hdl];
  2277. if (ctx->allocated) {
  2278. GSIERR("chan %d already allocated\n", chan_hdl);
  2279. return -GSI_STATUS_NODEV;
  2280. }
  2281. memset(ctx, 0, sizeof(*ctx));
  2282. mutex_init(&ctx->mlock);
  2283. init_completion(&ctx->compl);
  2284. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2285. mutex_lock(&gsi_ctx->mlock);
  2286. ee = gsi_ctx->per.ee;
  2287. gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
  2288. ch_cmd.chid = chan_hdl;
  2289. ch_cmd.opcode = op;
  2290. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD, ee, &ch_cmd);
  2291. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2292. if (res == 0) {
  2293. GSIERR("chan_hdl=%u timed out\n", chan_hdl);
  2294. mutex_unlock(&gsi_ctx->mlock);
  2295. return -GSI_STATUS_TIMED_OUT;
  2296. }
  2297. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2298. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2299. chan_hdl, ctx->state);
  2300. mutex_unlock(&gsi_ctx->mlock);
  2301. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2302. }
  2303. mutex_unlock(&gsi_ctx->mlock);
  2304. return GSI_STATUS_SUCCESS;
  2305. }
  2306. static void __gsi_write_channel_scratch(unsigned long chan_hdl,
  2307. union __packed gsi_channel_scratch val)
  2308. {
  2309. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2310. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2311. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2312. gsi_ctx->per.ee, chan_hdl, val.data.word2);
  2313. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2314. gsi_ctx->per.ee, chan_hdl, val.data.word3);
  2315. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2316. gsi_ctx->per.ee, chan_hdl, val.data.word4);
  2317. }
  2318. static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2319. union __packed gsi_wdi3_channel_scratch2_reg val)
  2320. {
  2321. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2322. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2323. }
  2324. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  2325. union __packed gsi_wdi_channel_scratch3_reg val)
  2326. {
  2327. struct gsi_chan_ctx *ctx;
  2328. if (!gsi_ctx) {
  2329. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2330. return -GSI_STATUS_NODEV;
  2331. }
  2332. if (chan_hdl >= gsi_ctx->max_ch) {
  2333. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2334. return -GSI_STATUS_INVALID_PARAMS;
  2335. }
  2336. ctx = &gsi_ctx->chan[chan_hdl];
  2337. mutex_lock(&ctx->mlock);
  2338. ctx->scratch.wdi.endp_metadatareg_offset =
  2339. val.wdi.endp_metadatareg_offset;
  2340. ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
  2341. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2342. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2343. mutex_unlock(&ctx->mlock);
  2344. return GSI_STATUS_SUCCESS;
  2345. }
  2346. EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
  2347. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  2348. union __packed gsi_wdi2_channel_scratch2_reg val)
  2349. {
  2350. struct gsi_chan_ctx *ctx;
  2351. if (!gsi_ctx) {
  2352. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2353. return -GSI_STATUS_NODEV;
  2354. }
  2355. if (chan_hdl >= gsi_ctx->max_ch) {
  2356. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2357. return -GSI_STATUS_INVALID_PARAMS;
  2358. }
  2359. ctx = &gsi_ctx->chan[chan_hdl];
  2360. mutex_lock(&ctx->mlock);
  2361. ctx->scratch.wdi2_new.endp_metadatareg_offset =
  2362. val.wdi.endp_metadatareg_offset;
  2363. ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
  2364. val.wdi.update_ri_moderation_threshold =
  2365. ctx->scratch.wdi2_new.update_ri_moderation_threshold;
  2366. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2367. gsi_ctx->per.ee, chan_hdl, val.data.word1);
  2368. mutex_unlock(&ctx->mlock);
  2369. return GSI_STATUS_SUCCESS;
  2370. }
  2371. EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
  2372. static void __gsi_read_channel_scratch(unsigned long chan_hdl,
  2373. union __packed gsi_channel_scratch * val)
  2374. {
  2375. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2376. gsi_ctx->per.ee, chan_hdl);
  2377. val->data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2378. gsi_ctx->per.ee, chan_hdl);
  2379. val->data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2380. gsi_ctx->per.ee, chan_hdl);
  2381. val->data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2382. gsi_ctx->per.ee, chan_hdl);
  2383. }
  2384. static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2385. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2386. {
  2387. val->data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2388. gsi_ctx->per.ee, chan_hdl);
  2389. }
  2390. int gsi_write_channel_scratch(unsigned long chan_hdl,
  2391. union __packed gsi_channel_scratch val)
  2392. {
  2393. struct gsi_chan_ctx *ctx;
  2394. if (!gsi_ctx) {
  2395. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2396. return -GSI_STATUS_NODEV;
  2397. }
  2398. if (chan_hdl >= gsi_ctx->max_ch) {
  2399. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2400. return -GSI_STATUS_INVALID_PARAMS;
  2401. }
  2402. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2403. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2404. GSIERR("bad state %d\n",
  2405. gsi_ctx->chan[chan_hdl].state);
  2406. return -GSI_STATUS_UNSUPPORTED_OP;
  2407. }
  2408. ctx = &gsi_ctx->chan[chan_hdl];
  2409. mutex_lock(&ctx->mlock);
  2410. ctx->scratch = val;
  2411. __gsi_write_channel_scratch(chan_hdl, val);
  2412. mutex_unlock(&ctx->mlock);
  2413. return GSI_STATUS_SUCCESS;
  2414. }
  2415. EXPORT_SYMBOL(gsi_write_channel_scratch);
  2416. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2417. union __packed gsi_wdi3_channel_scratch2_reg val)
  2418. {
  2419. struct gsi_chan_ctx *ctx;
  2420. if (!gsi_ctx) {
  2421. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2422. return -GSI_STATUS_NODEV;
  2423. }
  2424. if (chan_hdl >= gsi_ctx->max_ch) {
  2425. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2426. return -GSI_STATUS_INVALID_PARAMS;
  2427. }
  2428. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2429. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2430. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2431. GSIERR("bad state %d\n",
  2432. gsi_ctx->chan[chan_hdl].state);
  2433. return -GSI_STATUS_UNSUPPORTED_OP;
  2434. }
  2435. ctx = &gsi_ctx->chan[chan_hdl];
  2436. mutex_lock(&ctx->mlock);
  2437. ctx->scratch.data.word3 = val.data.word1;
  2438. __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
  2439. mutex_unlock(&ctx->mlock);
  2440. return GSI_STATUS_SUCCESS;
  2441. }
  2442. EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
  2443. int gsi_read_channel_scratch(unsigned long chan_hdl,
  2444. union __packed gsi_channel_scratch *val)
  2445. {
  2446. struct gsi_chan_ctx *ctx;
  2447. if (!gsi_ctx) {
  2448. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2449. return -GSI_STATUS_NODEV;
  2450. }
  2451. if (chan_hdl >= gsi_ctx->max_ch) {
  2452. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2453. return -GSI_STATUS_INVALID_PARAMS;
  2454. }
  2455. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2456. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2457. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2458. GSIERR("bad state %d\n",
  2459. gsi_ctx->chan[chan_hdl].state);
  2460. return -GSI_STATUS_UNSUPPORTED_OP;
  2461. }
  2462. ctx = &gsi_ctx->chan[chan_hdl];
  2463. mutex_lock(&ctx->mlock);
  2464. __gsi_read_channel_scratch(chan_hdl, val);
  2465. mutex_unlock(&ctx->mlock);
  2466. return GSI_STATUS_SUCCESS;
  2467. }
  2468. EXPORT_SYMBOL(gsi_read_channel_scratch);
  2469. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2470. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2471. {
  2472. struct gsi_chan_ctx *ctx;
  2473. if (!gsi_ctx) {
  2474. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2475. return -GSI_STATUS_NODEV;
  2476. }
  2477. if (chan_hdl >= gsi_ctx->max_ch) {
  2478. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2479. return -GSI_STATUS_INVALID_PARAMS;
  2480. }
  2481. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2482. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2483. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2484. GSIERR("bad state %d\n",
  2485. gsi_ctx->chan[chan_hdl].state);
  2486. return -GSI_STATUS_UNSUPPORTED_OP;
  2487. }
  2488. ctx = &gsi_ctx->chan[chan_hdl];
  2489. mutex_lock(&ctx->mlock);
  2490. __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
  2491. mutex_unlock(&ctx->mlock);
  2492. return GSI_STATUS_SUCCESS;
  2493. }
  2494. EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
  2495. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  2496. struct __packed gsi_mhi_channel_scratch mscr)
  2497. {
  2498. struct gsi_chan_ctx *ctx;
  2499. if (!gsi_ctx) {
  2500. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2501. return -GSI_STATUS_NODEV;
  2502. }
  2503. if (chan_hdl >= gsi_ctx->max_ch) {
  2504. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2505. return -GSI_STATUS_INVALID_PARAMS;
  2506. }
  2507. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2508. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2509. GSIERR("bad state %d\n",
  2510. gsi_ctx->chan[chan_hdl].state);
  2511. return -GSI_STATUS_UNSUPPORTED_OP;
  2512. }
  2513. ctx = &gsi_ctx->chan[chan_hdl];
  2514. mutex_lock(&ctx->mlock);
  2515. ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
  2516. mutex_unlock(&ctx->mlock);
  2517. return GSI_STATUS_SUCCESS;
  2518. }
  2519. EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
  2520. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2521. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2522. {
  2523. if (!gsi_ctx) {
  2524. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2525. return -GSI_STATUS_NODEV;
  2526. }
  2527. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2528. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2529. db_addr_wp_lsb);
  2530. return -GSI_STATUS_INVALID_PARAMS;
  2531. }
  2532. if (chan_hdl >= gsi_ctx->max_ch) {
  2533. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2534. return -GSI_STATUS_INVALID_PARAMS;
  2535. }
  2536. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  2537. GSIERR("bad state %d\n",
  2538. gsi_ctx->chan[chan_hdl].state);
  2539. return -GSI_STATUS_UNSUPPORTED_OP;
  2540. }
  2541. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  2542. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_0,
  2543. gsi_ctx->per.ee, chan_hdl);
  2544. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  2545. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2546. gsi_ctx->per.ee, chan_hdl);
  2547. return GSI_STATUS_SUCCESS;
  2548. }
  2549. EXPORT_SYMBOL(gsi_query_channel_db_addr);
  2550. int gsi_pending_irq_type(void)
  2551. {
  2552. int ee = gsi_ctx->per.ee;
  2553. return gsihal_read_reg_n(GSI_EE_n_CNTXT_TYPE_IRQ, ee);
  2554. }
  2555. EXPORT_SYMBOL(gsi_pending_irq_type);
  2556. int gsi_start_channel(unsigned long chan_hdl)
  2557. {
  2558. enum gsi_ch_cmd_opcode op = GSI_CH_START;
  2559. uint32_t val;
  2560. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2561. struct gsi_chan_ctx *ctx;
  2562. if (!gsi_ctx) {
  2563. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2564. return -GSI_STATUS_NODEV;
  2565. }
  2566. if (chan_hdl >= gsi_ctx->max_ch) {
  2567. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2568. return -GSI_STATUS_INVALID_PARAMS;
  2569. }
  2570. ctx = &gsi_ctx->chan[chan_hdl];
  2571. if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
  2572. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2573. ctx->state != GSI_CHAN_STATE_STOPPED) {
  2574. GSIERR("bad state %d\n", ctx->state);
  2575. return -GSI_STATUS_UNSUPPORTED_OP;
  2576. }
  2577. mutex_lock(&gsi_ctx->mlock);
  2578. reinit_completion(&ctx->compl);
  2579. /* check if INTSET is in IRQ mode for GPI channel */
  2580. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  2581. if (ctx->evtr &&
  2582. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2583. val != GSI_INTR_IRQ) {
  2584. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  2585. BUG();
  2586. }
  2587. gsi_ctx->ch_dbg[chan_hdl].ch_start++;
  2588. ch_cmd.chid = chan_hdl;
  2589. ch_cmd.opcode = op;
  2590. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2591. gsi_ctx->per.ee, &ch_cmd);
  2592. GSIDBG("GSI Channel Start, waiting for completion\n");
  2593. gsi_channel_state_change_wait(chan_hdl,
  2594. ctx,
  2595. GSI_START_CMD_TIMEOUT_MS, op);
  2596. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2597. ctx->state != GSI_CHAN_STATE_FLOW_CONTROL) {
  2598. /*
  2599. * Hardware returned unexpected status, unexpected
  2600. * hardware state.
  2601. */
  2602. GSIERR("chan=%lu timed out, unexpected state=%u\n",
  2603. chan_hdl, ctx->state);
  2604. gsi_dump_ch_info(chan_hdl);
  2605. GSI_ASSERT();
  2606. }
  2607. GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
  2608. /* write order MUST be MSB followed by LSB */
  2609. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_DOORBELL_1,
  2610. gsi_ctx->per.ee, ctx->props.ch_id, GSI_MSB(ctx->ring.wp_local));
  2611. mutex_unlock(&gsi_ctx->mlock);
  2612. return GSI_STATUS_SUCCESS;
  2613. }
  2614. EXPORT_SYMBOL(gsi_start_channel);
  2615. void gsi_dump_ch_info(unsigned long chan_hdl)
  2616. {
  2617. uint32_t val;
  2618. if (!gsi_ctx) {
  2619. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2620. return;
  2621. }
  2622. if (chan_hdl >= gsi_ctx->max_ch) {
  2623. GSIDBG("invalid chan id %u\n", chan_hdl);
  2624. return;
  2625. }
  2626. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  2627. gsi_ctx->per.ee, chan_hdl);
  2628. GSIERR("CH%2d CTX0 0x%x\n", chan_hdl, val);
  2629. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  2630. gsi_ctx->per.ee, chan_hdl);
  2631. GSIERR("CH%2d CTX1 0x%x\n", chan_hdl, val);
  2632. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  2633. gsi_ctx->per.ee, chan_hdl);
  2634. GSIERR("CH%2d CTX2 0x%x\n", chan_hdl, val);
  2635. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  2636. gsi_ctx->per.ee, chan_hdl);
  2637. GSIERR("CH%2d CTX3 0x%x\n", chan_hdl, val);
  2638. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  2639. gsi_ctx->per.ee, chan_hdl);
  2640. GSIERR("CH%2d CTX4 0x%x\n", chan_hdl, val);
  2641. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  2642. gsi_ctx->per.ee, chan_hdl);
  2643. GSIERR("CH%2d CTX5 0x%x\n", chan_hdl, val);
  2644. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  2645. gsi_ctx->per.ee, chan_hdl);
  2646. GSIERR("CH%2d CTX6 0x%x\n", chan_hdl, val);
  2647. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  2648. gsi_ctx->per.ee, chan_hdl);
  2649. GSIERR("CH%2d CTX7 0x%x\n", chan_hdl, val);
  2650. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2651. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_8,
  2652. gsi_ctx->per.ee, chan_hdl);
  2653. GSIERR("CH%2d CTX8 0x%x\n", chan_hdl, val);
  2654. }
  2655. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  2656. gsi_ctx->per.ee, chan_hdl);
  2657. GSIERR("CH%2d REFRP 0x%x\n", chan_hdl, val);
  2658. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  2659. gsi_ctx->per.ee, chan_hdl);
  2660. GSIERR("CH%2d REFWP 0x%x\n", chan_hdl, val);
  2661. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  2662. gsi_ctx->per.ee, chan_hdl);
  2663. GSIERR("CH%2d QOS 0x%x\n", chan_hdl, val);
  2664. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  2665. gsi_ctx->per.ee, chan_hdl);
  2666. GSIERR("CH%2d SCR0 0x%x\n", chan_hdl, val);
  2667. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  2668. gsi_ctx->per.ee, chan_hdl);
  2669. GSIERR("CH%2d SCR1 0x%x\n", chan_hdl, val);
  2670. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  2671. gsi_ctx->per.ee, chan_hdl);
  2672. GSIERR("CH%2d SCR2 0x%x\n", chan_hdl, val);
  2673. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  2674. gsi_ctx->per.ee, chan_hdl);
  2675. GSIERR("CH%2d SCR3 0x%x\n", chan_hdl, val);
  2676. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  2677. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4,
  2678. gsi_ctx->per.ee, chan_hdl);
  2679. GSIERR("CH%2d SCR4 0x%x\n", chan_hdl, val);
  2680. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_5,
  2681. gsi_ctx->per.ee, chan_hdl);
  2682. GSIERR("CH%2d SCR5 0x%x\n", chan_hdl, val);
  2683. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_6,
  2684. gsi_ctx->per.ee, chan_hdl);
  2685. GSIERR("CH%2d SCR6 0x%x\n", chan_hdl, val);
  2686. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_7,
  2687. gsi_ctx->per.ee, chan_hdl);
  2688. GSIERR("CH%2d SCR7 0x%x\n", chan_hdl, val);
  2689. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_8,
  2690. gsi_ctx->per.ee, chan_hdl);
  2691. GSIERR("CH%2d SCR8 0x%x\n", chan_hdl, val);
  2692. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_9,
  2693. gsi_ctx->per.ee, chan_hdl);
  2694. GSIERR("CH%2d SCR9 0x%x\n", chan_hdl, val);
  2695. }
  2696. return;
  2697. }
  2698. EXPORT_SYMBOL(gsi_dump_ch_info);
  2699. int gsi_stop_channel(unsigned long chan_hdl)
  2700. {
  2701. enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
  2702. int res;
  2703. uint32_t val;
  2704. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2705. struct gsi_chan_ctx *ctx;
  2706. if (!gsi_ctx) {
  2707. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2708. return -GSI_STATUS_NODEV;
  2709. }
  2710. if (chan_hdl >= gsi_ctx->max_ch) {
  2711. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2712. return -GSI_STATUS_INVALID_PARAMS;
  2713. }
  2714. ctx = &gsi_ctx->chan[chan_hdl];
  2715. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2716. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2717. return GSI_STATUS_SUCCESS;
  2718. }
  2719. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2720. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2721. ctx->state != GSI_CHAN_STATE_ERROR) {
  2722. GSIERR("bad state %d\n", ctx->state);
  2723. return -GSI_STATUS_UNSUPPORTED_OP;
  2724. }
  2725. mutex_lock(&gsi_ctx->mlock);
  2726. reinit_completion(&ctx->compl);
  2727. /* check if INTSET is in IRQ mode for GPI channel */
  2728. val = gsihal_read_reg_n(GSI_EE_n_CNTXT_INTSET, gsi_ctx->per.ee);
  2729. if (ctx->evtr &&
  2730. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2731. val != GSI_INTR_IRQ) {
  2732. GSIERR("GSI_EE_n_CNTXT_INTSET %d\n", val);
  2733. BUG();
  2734. }
  2735. gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
  2736. ch_cmd.chid = chan_hdl;
  2737. ch_cmd.opcode = op;
  2738. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2739. gsi_ctx->per.ee, &ch_cmd);
  2740. GSIDBG("GSI Channel Stop, waiting for completion: 0x%x\n", val);
  2741. gsi_channel_state_change_wait(chan_hdl,
  2742. ctx,
  2743. GSI_STOP_CMD_TIMEOUT_MS, op);
  2744. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2745. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2746. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2747. gsi_dump_ch_info(chan_hdl);
  2748. res = -GSI_STATUS_BAD_STATE;
  2749. BUG();
  2750. goto free_lock;
  2751. }
  2752. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2753. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2754. res = -GSI_STATUS_AGAIN;
  2755. goto free_lock;
  2756. }
  2757. res = GSI_STATUS_SUCCESS;
  2758. free_lock:
  2759. mutex_unlock(&gsi_ctx->mlock);
  2760. return res;
  2761. }
  2762. EXPORT_SYMBOL(gsi_stop_channel);
  2763. int gsi_stop_db_channel(unsigned long chan_hdl)
  2764. {
  2765. enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
  2766. int res;
  2767. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2768. struct gsi_chan_ctx *ctx;
  2769. if (!gsi_ctx) {
  2770. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2771. return -GSI_STATUS_NODEV;
  2772. }
  2773. if (chan_hdl >= gsi_ctx->max_ch) {
  2774. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2775. return -GSI_STATUS_INVALID_PARAMS;
  2776. }
  2777. ctx = &gsi_ctx->chan[chan_hdl];
  2778. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2779. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2780. return GSI_STATUS_SUCCESS;
  2781. }
  2782. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2783. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2784. GSIERR("bad state %d\n", ctx->state);
  2785. return -GSI_STATUS_UNSUPPORTED_OP;
  2786. }
  2787. mutex_lock(&gsi_ctx->mlock);
  2788. reinit_completion(&ctx->compl);
  2789. gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
  2790. ch_cmd.chid = chan_hdl;
  2791. ch_cmd.opcode = op;
  2792. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2793. gsi_ctx->per.ee, &ch_cmd);
  2794. res = wait_for_completion_timeout(&ctx->compl,
  2795. msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
  2796. if (res == 0) {
  2797. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2798. res = -GSI_STATUS_TIMED_OUT;
  2799. goto free_lock;
  2800. }
  2801. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2802. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2803. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2804. res = -GSI_STATUS_BAD_STATE;
  2805. goto free_lock;
  2806. }
  2807. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2808. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2809. res = -GSI_STATUS_AGAIN;
  2810. goto free_lock;
  2811. }
  2812. res = GSI_STATUS_SUCCESS;
  2813. free_lock:
  2814. mutex_unlock(&gsi_ctx->mlock);
  2815. return res;
  2816. }
  2817. EXPORT_SYMBOL(gsi_stop_db_channel);
  2818. int gsi_reset_channel(unsigned long chan_hdl)
  2819. {
  2820. enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
  2821. int res;
  2822. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2823. struct gsi_chan_ctx *ctx;
  2824. bool reset_done = false;
  2825. uint32_t retry_cnt = 0;
  2826. if (!gsi_ctx) {
  2827. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2828. return -GSI_STATUS_NODEV;
  2829. }
  2830. if (chan_hdl >= gsi_ctx->max_ch) {
  2831. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2832. return -GSI_STATUS_INVALID_PARAMS;
  2833. }
  2834. ctx = &gsi_ctx->chan[chan_hdl];
  2835. /*
  2836. * In WDI3 case, if SAP enabled but no client connected,
  2837. * GSI will be in allocated state. When SAP disabled,
  2838. * gsi_reset_channel will be called and reset is needed.
  2839. */
  2840. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2841. ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2842. GSIERR("bad state %d\n", ctx->state);
  2843. return -GSI_STATUS_UNSUPPORTED_OP;
  2844. }
  2845. mutex_lock(&gsi_ctx->mlock);
  2846. reset:
  2847. reinit_completion(&ctx->compl);
  2848. gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
  2849. ch_cmd.chid = chan_hdl;
  2850. ch_cmd.opcode = op;
  2851. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2852. gsi_ctx->per.ee, &ch_cmd);
  2853. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2854. if (res == 0) {
  2855. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2856. mutex_unlock(&gsi_ctx->mlock);
  2857. return -GSI_STATUS_TIMED_OUT;
  2858. }
  2859. revrfy_chnlstate:
  2860. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2861. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2862. ctx->state);
  2863. /* GSI register update state not sync with gsi channel
  2864. * context state not sync, need to wait for 1ms to sync.
  2865. */
  2866. retry_cnt++;
  2867. if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
  2868. usleep_range(GSI_RESET_WA_MIN_SLEEP,
  2869. GSI_RESET_WA_MAX_SLEEP);
  2870. goto revrfy_chnlstate;
  2871. }
  2872. /*
  2873. * Hardware returned incorrect state, unexpected
  2874. * hardware state.
  2875. */
  2876. GSI_ASSERT();
  2877. }
  2878. /* Hardware issue fixed from GSI 2.0 and no need for the WA */
  2879. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  2880. reset_done = true;
  2881. /* workaround: reset GSI producers again */
  2882. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
  2883. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  2884. reset_done = true;
  2885. goto reset;
  2886. }
  2887. if (ctx->props.cleanup_cb)
  2888. gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
  2889. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  2890. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  2891. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  2892. /* restore scratch */
  2893. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  2894. mutex_unlock(&gsi_ctx->mlock);
  2895. return GSI_STATUS_SUCCESS;
  2896. }
  2897. EXPORT_SYMBOL(gsi_reset_channel);
  2898. int gsi_dealloc_channel(unsigned long chan_hdl)
  2899. {
  2900. enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
  2901. int res;
  2902. struct gsihal_reg_ee_n_gsi_ch_cmd ch_cmd;
  2903. struct gsi_chan_ctx *ctx;
  2904. if (!gsi_ctx) {
  2905. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2906. return -GSI_STATUS_NODEV;
  2907. }
  2908. if (chan_hdl >= gsi_ctx->max_ch) {
  2909. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2910. return -GSI_STATUS_INVALID_PARAMS;
  2911. }
  2912. ctx = &gsi_ctx->chan[chan_hdl];
  2913. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2914. GSIERR("bad state %d\n", ctx->state);
  2915. return -GSI_STATUS_UNSUPPORTED_OP;
  2916. }
  2917. /*In GSI_VER_2_2 version deallocation channel not supported*/
  2918. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2919. mutex_lock(&gsi_ctx->mlock);
  2920. reinit_completion(&ctx->compl);
  2921. gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
  2922. ch_cmd.chid = chan_hdl;
  2923. ch_cmd.opcode = op;
  2924. gsihal_write_reg_n_fields(GSI_EE_n_GSI_CH_CMD,
  2925. gsi_ctx->per.ee, &ch_cmd);
  2926. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2927. if (res == 0) {
  2928. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2929. mutex_unlock(&gsi_ctx->mlock);
  2930. return -GSI_STATUS_TIMED_OUT;
  2931. }
  2932. if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
  2933. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2934. ctx->state);
  2935. /* Hardware returned incorrect value */
  2936. GSI_ASSERT();
  2937. }
  2938. mutex_unlock(&gsi_ctx->mlock);
  2939. } else {
  2940. mutex_lock(&gsi_ctx->mlock);
  2941. GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
  2942. ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
  2943. GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
  2944. ctx->state);
  2945. mutex_unlock(&gsi_ctx->mlock);
  2946. }
  2947. devm_kfree(gsi_ctx->dev, ctx->user_data);
  2948. ctx->allocated = false;
  2949. if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
  2950. atomic_dec(&ctx->evtr->chan_ref_cnt);
  2951. atomic_dec(&gsi_ctx->num_chan);
  2952. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  2953. gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
  2954. gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
  2955. }
  2956. return GSI_STATUS_SUCCESS;
  2957. }
  2958. EXPORT_SYMBOL(gsi_dealloc_channel);
  2959. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
  2960. {
  2961. unsigned long now = jiffies_to_msecs(jiffies);
  2962. unsigned long elapsed;
  2963. if (used == 0) {
  2964. elapsed = now - ctx->stats.dp.last_timestamp;
  2965. if (ctx->stats.dp.empty_time < elapsed)
  2966. ctx->stats.dp.empty_time = elapsed;
  2967. }
  2968. if (used <= ctx->props.max_re_expected / 3)
  2969. ++ctx->stats.dp.ch_below_lo;
  2970. else if (used <= 2 * ctx->props.max_re_expected / 3)
  2971. ++ctx->stats.dp.ch_below_hi;
  2972. else
  2973. ++ctx->stats.dp.ch_above_hi;
  2974. ctx->stats.dp.last_timestamp = now;
  2975. }
  2976. static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
  2977. uint16_t *num_free_re)
  2978. {
  2979. uint16_t start;
  2980. uint16_t end;
  2981. uint64_t rp;
  2982. int ee = gsi_ctx->per.ee;
  2983. uint16_t used;
  2984. WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
  2985. if (!ctx->evtr) {
  2986. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  2987. ee, ctx->props.ch_id);
  2988. rp |= ctx->ring.rp & GSI_MSB_MASK;
  2989. ctx->ring.rp = rp;
  2990. } else {
  2991. rp = ctx->ring.rp_local;
  2992. }
  2993. start = gsi_find_idx_from_addr(&ctx->ring, rp);
  2994. end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  2995. if (end >= start)
  2996. used = end - start;
  2997. else
  2998. used = ctx->ring.max_num_elem + 1 - (start - end);
  2999. *num_free_re = ctx->ring.max_num_elem - used;
  3000. }
  3001. int gsi_query_channel_info(unsigned long chan_hdl,
  3002. struct gsi_chan_info *info)
  3003. {
  3004. struct gsi_chan_ctx *ctx;
  3005. spinlock_t *slock;
  3006. unsigned long flags;
  3007. uint64_t rp;
  3008. uint64_t wp;
  3009. int ee;
  3010. if (!gsi_ctx) {
  3011. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3012. return -GSI_STATUS_NODEV;
  3013. }
  3014. if (chan_hdl >= gsi_ctx->max_ch || !info) {
  3015. GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
  3016. return -GSI_STATUS_INVALID_PARAMS;
  3017. }
  3018. ctx = &gsi_ctx->chan[chan_hdl];
  3019. if (ctx->evtr) {
  3020. slock = &ctx->evtr->ring.slock;
  3021. info->evt_valid = true;
  3022. } else {
  3023. slock = &ctx->ring.slock;
  3024. info->evt_valid = false;
  3025. }
  3026. spin_lock_irqsave(slock, flags);
  3027. ee = gsi_ctx->per.ee;
  3028. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3029. ee, ctx->props.ch_id);
  3030. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  3031. ee, ctx->props.ch_id)) << 32;
  3032. ctx->ring.rp = rp;
  3033. info->rp = rp;
  3034. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3035. ee, ctx->props.ch_id);
  3036. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  3037. ee, ctx->props.ch_id)) << 32;
  3038. ctx->ring.wp = wp;
  3039. info->wp = wp;
  3040. if (info->evt_valid) {
  3041. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4,
  3042. ee, ctx->evtr->id);
  3043. rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_5,
  3044. ee, ctx->evtr->id)) << 32;
  3045. info->evt_rp = rp;
  3046. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3047. ee, ctx->evtr->id);
  3048. wp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_7,
  3049. ee, ctx->evtr->id)) << 32;
  3050. info->evt_wp = wp;
  3051. }
  3052. spin_unlock_irqrestore(slock, flags);
  3053. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
  3054. chan_hdl, info->rp, info->wp,
  3055. info->evt_valid, info->evt_rp, info->evt_wp);
  3056. return GSI_STATUS_SUCCESS;
  3057. }
  3058. EXPORT_SYMBOL(gsi_query_channel_info);
  3059. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
  3060. {
  3061. struct gsi_chan_ctx *ctx;
  3062. spinlock_t *slock;
  3063. unsigned long flags;
  3064. uint64_t rp;
  3065. uint64_t wp;
  3066. uint64_t rp_local;
  3067. int ee;
  3068. if (!gsi_ctx) {
  3069. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3070. return -GSI_STATUS_NODEV;
  3071. }
  3072. if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
  3073. GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
  3074. chan_hdl, is_empty);
  3075. return -GSI_STATUS_INVALID_PARAMS;
  3076. }
  3077. ctx = &gsi_ctx->chan[chan_hdl];
  3078. ee = gsi_ctx->per.ee;
  3079. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3080. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3081. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3082. return -GSI_STATUS_UNSUPPORTED_OP;
  3083. }
  3084. if (ctx->evtr)
  3085. slock = &ctx->evtr->ring.slock;
  3086. else
  3087. slock = &ctx->ring.slock;
  3088. spin_lock_irqsave(slock, flags);
  3089. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
  3090. rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4,
  3091. ee, ctx->evtr->id);
  3092. rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
  3093. ctx->evtr->ring.rp = rp;
  3094. wp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_6,
  3095. ee, ctx->evtr->id);
  3096. wp |= ctx->evtr->ring.wp & GSI_MSB_MASK;
  3097. ctx->evtr->ring.wp = wp;
  3098. rp_local = ctx->evtr->ring.rp_local;
  3099. } else {
  3100. rp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  3101. ee, ctx->props.ch_id);
  3102. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3103. ctx->ring.rp = rp;
  3104. wp = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  3105. ee, ctx->props.ch_id);
  3106. wp |= ctx->ring.wp & GSI_MSB_MASK;
  3107. ctx->ring.wp = wp;
  3108. rp_local = ctx->ring.rp_local;
  3109. }
  3110. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  3111. *is_empty = (rp_local == rp) ? true : false;
  3112. else
  3113. *is_empty = (wp == rp) ? true : false;
  3114. spin_unlock_irqrestore(slock, flags);
  3115. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
  3116. GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3117. chan_hdl, ctx->evtr->id, rp, wp, rp_local);
  3118. else
  3119. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3120. chan_hdl, rp, wp, rp_local);
  3121. return GSI_STATUS_SUCCESS;
  3122. }
  3123. EXPORT_SYMBOL(gsi_is_channel_empty);
  3124. int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
  3125. {
  3126. int i;
  3127. int end;
  3128. if (!ctx->user_data[idx].valid) {
  3129. ctx->user_data[idx].valid = true;
  3130. return idx;
  3131. }
  3132. /*
  3133. * at this point we need to find an "escape buffer" for the cookie
  3134. * as the userdata in this spot is in use. This happens if the TRE at
  3135. * idx is not completed yet and it is getting reused by a new TRE.
  3136. */
  3137. ctx->stats.userdata_in_use++;
  3138. end = ctx->ring.max_num_elem + 1;
  3139. for (i = 0; i < GSI_VEID_MAX; i++) {
  3140. if (!ctx->user_data[end + i].valid) {
  3141. ctx->user_data[end + i].valid = true;
  3142. return end + i;
  3143. }
  3144. }
  3145. /* Go over original userdata when escape buffer is full (costly) */
  3146. GSIDBG("escape buffer is full\n");
  3147. for (i = 0; i < end; i++) {
  3148. if (!ctx->user_data[i].valid) {
  3149. ctx->user_data[i].valid = true;
  3150. return i;
  3151. }
  3152. }
  3153. /* Everything is full (possibly a stall) */
  3154. GSIERR("both userdata array and escape buffer is full\n");
  3155. BUG();
  3156. return 0xFFFF;
  3157. }
  3158. int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
  3159. struct gsi_xfer_elem *xfer)
  3160. {
  3161. struct gsi_gci_tre gci_tre;
  3162. struct gsi_gci_tre *tre_gci_ptr;
  3163. uint16_t idx;
  3164. memset(&gci_tre, 0, sizeof(gci_tre));
  3165. if (xfer->addr & 0xFFFFFF0000000000) {
  3166. GSIERR("chan_hdl=%u add too large=%llx\n",
  3167. ctx->props.ch_id, xfer->addr);
  3168. return -EINVAL;
  3169. }
  3170. if (xfer->type != GSI_XFER_ELEM_DATA) {
  3171. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3172. xfer->type);
  3173. return -EINVAL;
  3174. }
  3175. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3176. tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
  3177. idx * ctx->ring.elem_sz);
  3178. gci_tre.buffer_ptr = xfer->addr;
  3179. gci_tre.buf_len = xfer->len;
  3180. gci_tre.re_type = GSI_RE_COAL;
  3181. gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
  3182. if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
  3183. return -EPERM;
  3184. /* write the TRE to ring */
  3185. *tre_gci_ptr = gci_tre;
  3186. ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
  3187. return 0;
  3188. }
  3189. int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
  3190. struct gsi_xfer_elem *xfer)
  3191. {
  3192. struct gsi_tre tre;
  3193. struct gsi_tre *tre_ptr;
  3194. uint16_t idx;
  3195. memset(&tre, 0, sizeof(tre));
  3196. tre.buffer_ptr = xfer->addr;
  3197. tre.buf_len = xfer->len;
  3198. if (xfer->type == GSI_XFER_ELEM_DATA) {
  3199. tre.re_type = GSI_RE_XFER;
  3200. } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
  3201. tre.re_type = GSI_RE_IMMD_CMD;
  3202. } else if (xfer->type == GSI_XFER_ELEM_NOP) {
  3203. tre.re_type = GSI_RE_NOP;
  3204. } else {
  3205. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3206. xfer->type);
  3207. return -EINVAL;
  3208. }
  3209. tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
  3210. tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
  3211. tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
  3212. tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
  3213. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3214. tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
  3215. idx * ctx->ring.elem_sz);
  3216. /* write the TRE to ring */
  3217. *tre_ptr = tre;
  3218. ctx->user_data[idx].valid = true;
  3219. ctx->user_data[idx].p = xfer->xfer_user_data;
  3220. return 0;
  3221. }
  3222. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  3223. struct gsi_xfer_elem *xfer, bool ring_db)
  3224. {
  3225. struct gsi_chan_ctx *ctx;
  3226. uint16_t free;
  3227. uint64_t wp_rollback;
  3228. int i;
  3229. spinlock_t *slock;
  3230. unsigned long flags;
  3231. if (!gsi_ctx) {
  3232. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3233. return -GSI_STATUS_NODEV;
  3234. }
  3235. if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
  3236. GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
  3237. chan_hdl, num_xfers, xfer);
  3238. return -GSI_STATUS_INVALID_PARAMS;
  3239. }
  3240. if (unlikely(gsi_ctx->chan[chan_hdl].state
  3241. == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3242. GSIERR("bad state %d\n",
  3243. gsi_ctx->chan[chan_hdl].state);
  3244. return -GSI_STATUS_UNSUPPORTED_OP;
  3245. }
  3246. ctx = &gsi_ctx->chan[chan_hdl];
  3247. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3248. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3249. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3250. return -GSI_STATUS_UNSUPPORTED_OP;
  3251. }
  3252. if (ctx->evtr)
  3253. slock = &ctx->evtr->ring.slock;
  3254. else
  3255. slock = &ctx->ring.slock;
  3256. spin_lock_irqsave(slock, flags);
  3257. /* allow only ring doorbell */
  3258. if (!num_xfers)
  3259. goto ring_doorbell;
  3260. /*
  3261. * for GCI channels the responsibility is on the caller to make sure
  3262. * there is enough room in the TRE.
  3263. */
  3264. if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3265. __gsi_query_channel_free_re(ctx, &free);
  3266. if (num_xfers > free) {
  3267. GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
  3268. chan_hdl, num_xfers, free);
  3269. spin_unlock_irqrestore(slock, flags);
  3270. return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
  3271. }
  3272. }
  3273. wp_rollback = ctx->ring.wp_local;
  3274. for (i = 0; i < num_xfers; i++) {
  3275. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3276. if (__gsi_populate_gci_tre(ctx, &xfer[i]))
  3277. break;
  3278. } else {
  3279. if (__gsi_populate_tre(ctx, &xfer[i]))
  3280. break;
  3281. }
  3282. gsi_incr_ring_wp(&ctx->ring);
  3283. }
  3284. if (i != num_xfers) {
  3285. /* reject all the xfers */
  3286. ctx->ring.wp_local = wp_rollback;
  3287. spin_unlock_irqrestore(slock, flags);
  3288. return -GSI_STATUS_INVALID_PARAMS;
  3289. }
  3290. ctx->stats.queued += num_xfers;
  3291. ring_doorbell:
  3292. if (ring_db) {
  3293. /* ensure TRE is set before ringing doorbell */
  3294. wmb();
  3295. gsi_ring_chan_doorbell(ctx);
  3296. }
  3297. spin_unlock_irqrestore(slock, flags);
  3298. return GSI_STATUS_SUCCESS;
  3299. }
  3300. EXPORT_SYMBOL(gsi_queue_xfer);
  3301. int gsi_start_xfer(unsigned long chan_hdl)
  3302. {
  3303. struct gsi_chan_ctx *ctx;
  3304. if (!gsi_ctx) {
  3305. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3306. return -GSI_STATUS_NODEV;
  3307. }
  3308. if (chan_hdl >= gsi_ctx->max_ch) {
  3309. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3310. return -GSI_STATUS_INVALID_PARAMS;
  3311. }
  3312. ctx = &gsi_ctx->chan[chan_hdl];
  3313. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3314. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3315. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3316. return -GSI_STATUS_UNSUPPORTED_OP;
  3317. }
  3318. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3319. GSIERR("bad state %d\n", ctx->state);
  3320. return -GSI_STATUS_UNSUPPORTED_OP;
  3321. }
  3322. if (ctx->ring.wp == ctx->ring.wp_local)
  3323. return GSI_STATUS_SUCCESS;
  3324. gsi_ring_chan_doorbell(ctx);
  3325. return GSI_STATUS_SUCCESS;
  3326. };
  3327. EXPORT_SYMBOL(gsi_start_xfer);
  3328. int gsi_poll_channel(unsigned long chan_hdl,
  3329. struct gsi_chan_xfer_notify *notify)
  3330. {
  3331. int unused_var;
  3332. return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
  3333. }
  3334. EXPORT_SYMBOL(gsi_poll_channel);
  3335. int gsi_poll_n_channel(unsigned long chan_hdl,
  3336. struct gsi_chan_xfer_notify *notify,
  3337. int expected_num, int *actual_num)
  3338. {
  3339. struct gsi_chan_ctx *ctx;
  3340. uint64_t rp;
  3341. int ee;
  3342. int i;
  3343. unsigned long flags;
  3344. if (!gsi_ctx) {
  3345. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3346. return -GSI_STATUS_NODEV;
  3347. }
  3348. if (chan_hdl >= gsi_ctx->max_ch || !notify ||
  3349. !actual_num || expected_num <= 0) {
  3350. GSIERR("bad params chan_hdl=%lu notify=%pK\n",
  3351. chan_hdl, notify);
  3352. GSIERR("actual_num=%pK expected_num=%d\n",
  3353. actual_num, expected_num);
  3354. return -GSI_STATUS_INVALID_PARAMS;
  3355. }
  3356. ctx = &gsi_ctx->chan[chan_hdl];
  3357. ee = gsi_ctx->per.ee;
  3358. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3359. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3360. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3361. return -GSI_STATUS_UNSUPPORTED_OP;
  3362. }
  3363. /* Before going to poll packet make sure it was in allocated state */
  3364. if (unlikely(ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3365. GSIERR("bad state %d\n", ctx->state);
  3366. return -GSI_STATUS_UNSUPPORTED_OP;
  3367. }
  3368. if (!ctx->evtr) {
  3369. GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
  3370. return -GSI_STATUS_UNSUPPORTED_OP;
  3371. }
  3372. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3373. if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
  3374. /* update rp to see of we have anything new to process */
  3375. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3376. &ctx->evtr->props, ctx->evtr->id, ee);
  3377. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3378. ctx->evtr->ring.rp = rp;
  3379. /* read gsi event ring rp again if last read is empty */
  3380. if (rp == ctx->evtr->ring.rp_local) {
  3381. /* event ring is empty */
  3382. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3383. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_k,
  3384. ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3385. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3386. }
  3387. else {
  3388. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ,
  3389. ee, 1 << ctx->evtr->id);
  3390. }
  3391. /* do another read to close a small window */
  3392. __iowmb();
  3393. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3394. &ctx->evtr->props, ctx->evtr->id, ee);
  3395. rp |= ctx->ring.rp & GSI_MSB_MASK;
  3396. ctx->evtr->ring.rp = rp;
  3397. if (rp == ctx->evtr->ring.rp_local) {
  3398. spin_unlock_irqrestore(
  3399. &ctx->evtr->ring.slock,
  3400. flags);
  3401. ctx->stats.poll_empty++;
  3402. return GSI_STATUS_POLL_EMPTY;
  3403. }
  3404. }
  3405. }
  3406. *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
  3407. ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
  3408. if (*actual_num > expected_num)
  3409. *actual_num = expected_num;
  3410. for (i = 0; i < *actual_num; i++)
  3411. gsi_process_evt_re(ctx->evtr, notify + i, false);
  3412. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3413. ctx->stats.poll_ok++;
  3414. return GSI_STATUS_SUCCESS;
  3415. }
  3416. EXPORT_SYMBOL(gsi_poll_n_channel);
  3417. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
  3418. {
  3419. struct gsi_chan_ctx *ctx, *coal_ctx;
  3420. enum gsi_chan_mode curr;
  3421. unsigned long flags;
  3422. enum gsi_chan_mode chan_mode;
  3423. if (!gsi_ctx) {
  3424. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3425. return -GSI_STATUS_NODEV;
  3426. }
  3427. if (chan_hdl >= gsi_ctx->max_ch) {
  3428. GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
  3429. return -GSI_STATUS_INVALID_PARAMS;
  3430. }
  3431. ctx = &gsi_ctx->chan[chan_hdl];
  3432. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3433. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3434. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3435. return -GSI_STATUS_UNSUPPORTED_OP;
  3436. }
  3437. if (!ctx->evtr || !ctx->evtr->props.exclusive) {
  3438. GSIERR("cannot configure mode on chan_hdl=%lu\n",
  3439. chan_hdl);
  3440. return -GSI_STATUS_UNSUPPORTED_OP;
  3441. }
  3442. if (atomic_read(&ctx->poll_mode))
  3443. curr = GSI_CHAN_MODE_POLL;
  3444. else
  3445. curr = GSI_CHAN_MODE_CALLBACK;
  3446. if (mode == curr) {
  3447. GSIDBG("already in requested mode %u chan_hdl=%lu\n",
  3448. curr, chan_hdl);
  3449. return -GSI_STATUS_UNSUPPORTED_OP;
  3450. }
  3451. spin_lock_irqsave(&gsi_ctx->slock, flags);
  3452. if (curr == GSI_CHAN_MODE_CALLBACK &&
  3453. mode == GSI_CHAN_MODE_POLL) {
  3454. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3455. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3456. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3457. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3458. 0);
  3459. }
  3460. else {
  3461. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3462. }
  3463. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3464. gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3465. gsi_ctx->per.ee, gsihal_get_ch_reg_idx(ctx->evtr->id),
  3466. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3467. }
  3468. else {
  3469. gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3470. gsi_ctx->per.ee, 1 << ctx->evtr->id);
  3471. }
  3472. atomic_set(&ctx->poll_mode, mode);
  3473. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3474. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3475. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3476. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3477. if (coal_ctx != NULL)
  3478. atomic_set(&coal_ctx->poll_mode, mode);
  3479. }
  3480. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3481. ctx->evtr->id, mode);
  3482. ctx->stats.callback_to_poll++;
  3483. }
  3484. if (curr == GSI_CHAN_MODE_POLL &&
  3485. mode == GSI_CHAN_MODE_CALLBACK) {
  3486. atomic_set(&ctx->poll_mode, mode);
  3487. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3488. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3489. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3490. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3491. if (coal_ctx != NULL)
  3492. atomic_set(&coal_ctx->poll_mode, mode);
  3493. }
  3494. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3495. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3496. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3497. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3498. ~0);
  3499. }
  3500. else {
  3501. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
  3502. }
  3503. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3504. ctx->evtr->id, mode);
  3505. /*
  3506. * In GSI 2.2 and 2.5 there is a limitation that can lead
  3507. * to losing an interrupt. For these versions an
  3508. * explicit check is needed after enabling the interrupt
  3509. */
  3510. if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
  3511. gsi_ctx->per.ver == GSI_VER_2_5) &&
  3512. !gsi_ctx->per.skip_ieob_mask_wa) {
  3513. u32 src = gsihal_read_reg_n(
  3514. GSI_EE_n_CNTXT_SRC_IEOB_IRQ,
  3515. gsi_ctx->per.ee);
  3516. if (src & (1 << ctx->evtr->id)) {
  3517. if (gsi_ctx->per.ver >= GSI_VER_3_0) {
  3518. __gsi_config_ieob_irq_k(gsi_ctx->per.ee,
  3519. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3520. gsihal_get_ch_reg_mask(ctx->evtr->id),
  3521. 0);
  3522. gsihal_write_reg_nk(
  3523. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k,
  3524. gsi_ctx->per.ee,
  3525. gsihal_get_ch_reg_idx(ctx->evtr->id),
  3526. gsihal_get_ch_reg_mask(ctx->evtr->id));
  3527. }
  3528. else {
  3529. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 <<
  3530. ctx->evtr->id, 0);
  3531. gsihal_write_reg_n(
  3532. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
  3533. gsi_ctx->per.ee,
  3534. 1 << ctx->evtr->id);
  3535. }
  3536. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3537. spin_lock_irqsave(&ctx->evtr->ring.slock,
  3538. flags);
  3539. chan_mode = atomic_xchg(&ctx->poll_mode,
  3540. GSI_CHAN_MODE_POLL);
  3541. spin_unlock_irqrestore(
  3542. &ctx->evtr->ring.slock, flags);
  3543. ctx->stats.poll_pending_irq++;
  3544. GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
  3545. ctx->stats.poll_pending_irq,
  3546. chan_mode);
  3547. if (chan_mode == GSI_CHAN_MODE_POLL)
  3548. return GSI_STATUS_SUCCESS;
  3549. else
  3550. return -GSI_STATUS_PENDING_IRQ;
  3551. }
  3552. }
  3553. ctx->stats.poll_to_callback++;
  3554. }
  3555. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3556. return GSI_STATUS_SUCCESS;
  3557. }
  3558. EXPORT_SYMBOL(gsi_config_channel_mode);
  3559. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3560. union gsi_channel_scratch *scr)
  3561. {
  3562. struct gsi_chan_ctx *ctx;
  3563. if (!gsi_ctx) {
  3564. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3565. return -GSI_STATUS_NODEV;
  3566. }
  3567. if (!props || !scr) {
  3568. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  3569. return -GSI_STATUS_INVALID_PARAMS;
  3570. }
  3571. if (chan_hdl >= gsi_ctx->max_ch) {
  3572. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3573. return -GSI_STATUS_INVALID_PARAMS;
  3574. }
  3575. ctx = &gsi_ctx->chan[chan_hdl];
  3576. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3577. GSIERR("bad state %d\n", ctx->state);
  3578. return -GSI_STATUS_UNSUPPORTED_OP;
  3579. }
  3580. mutex_lock(&ctx->mlock);
  3581. *props = ctx->props;
  3582. *scr = ctx->scratch;
  3583. mutex_unlock(&ctx->mlock);
  3584. return GSI_STATUS_SUCCESS;
  3585. }
  3586. EXPORT_SYMBOL(gsi_get_channel_cfg);
  3587. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3588. union gsi_channel_scratch *scr)
  3589. {
  3590. struct gsi_chan_ctx *ctx;
  3591. if (!gsi_ctx) {
  3592. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3593. return -GSI_STATUS_NODEV;
  3594. }
  3595. if (!props || gsi_validate_channel_props(props)) {
  3596. GSIERR("bad params props=%pK\n", props);
  3597. return -GSI_STATUS_INVALID_PARAMS;
  3598. }
  3599. if (chan_hdl >= gsi_ctx->max_ch) {
  3600. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3601. return -GSI_STATUS_INVALID_PARAMS;
  3602. }
  3603. ctx = &gsi_ctx->chan[chan_hdl];
  3604. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3605. GSIERR("bad state %d\n", ctx->state);
  3606. return -GSI_STATUS_UNSUPPORTED_OP;
  3607. }
  3608. if (ctx->props.ch_id != props->ch_id ||
  3609. ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
  3610. GSIERR("changing immutable fields not supported\n");
  3611. return -GSI_STATUS_UNSUPPORTED_OP;
  3612. }
  3613. mutex_lock(&ctx->mlock);
  3614. ctx->props = *props;
  3615. if (scr)
  3616. ctx->scratch = *scr;
  3617. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  3618. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  3619. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  3620. /* restore scratch */
  3621. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  3622. mutex_unlock(&ctx->mlock);
  3623. return GSI_STATUS_SUCCESS;
  3624. }
  3625. EXPORT_SYMBOL(gsi_set_channel_cfg);
  3626. static void gsi_configure_ieps(enum gsi_ver ver)
  3627. {
  3628. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_CMD, 1);
  3629. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DB, 2);
  3630. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_DIS_COMP, 3);
  3631. gsihal_write_reg(GSI_GSI_IRAM_PTR_CH_EMPTY, 4);
  3632. gsihal_write_reg(GSI_GSI_IRAM_PTR_EE_GENERIC_CMD, 5);
  3633. gsihal_write_reg(GSI_GSI_IRAM_PTR_EVENT_GEN_COMP, 6);
  3634. gsihal_write_reg(GSI_GSI_IRAM_PTR_INT_MOD_STOPPED, 7);
  3635. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0, 8);
  3636. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2, 9);
  3637. gsihal_write_reg(GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1, 10);
  3638. gsihal_write_reg(GSI_GSI_IRAM_PTR_NEW_RE, 11);
  3639. gsihal_write_reg(GSI_GSI_IRAM_PTR_READ_ENG_COMP, 12);
  3640. gsihal_write_reg(GSI_GSI_IRAM_PTR_TIMER_EXPIRED, 13);
  3641. gsihal_write_reg(GSI_GSI_IRAM_PTR_EV_DB, 14);
  3642. gsihal_write_reg(GSI_GSI_IRAM_PTR_UC_GP_INT, 15);
  3643. gsihal_write_reg(GSI_GSI_IRAM_PTR_WRITE_ENG_COMP, 16);
  3644. if (ver >= GSI_VER_2_5)
  3645. gsihal_write_reg(
  3646. GSI_GSI_IRAM_PTR_TLV_CH_NOT_FULL,
  3647. 17);
  3648. if (ver >= GSI_VER_2_11)
  3649. gsihal_write_reg(
  3650. GSI_GSI_IRAM_PTR_MSI_DB,
  3651. 18);
  3652. if (ver >= GSI_VER_3_0)
  3653. gsihal_write_reg(
  3654. GSI_GSI_IRAM_PTR_INT_NOTIFY_MCS,
  3655. 19);
  3656. }
  3657. static void gsi_configure_bck_prs_matrix(void)
  3658. {
  3659. /*
  3660. * For now, these are default values. In the future, GSI FW image will
  3661. * produce optimized back-pressure values based on the FW image.
  3662. */
  3663. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_LSB, 0xfffffffe);
  3664. gsihal_write_reg(GSI_IC_DISABLE_CHNL_BCK_PRS_MSB, 0xffffffff);
  3665. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_LSB, 0xffffffbf);
  3666. gsihal_write_reg(GSI_IC_GEN_EVNT_BCK_PRS_MSB, 0xffffffff);
  3667. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_LSB, 0xffffefff);
  3668. gsihal_write_reg(GSI_IC_GEN_INT_BCK_PRS_MSB, 0xffffffff);
  3669. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_LSB, 0xffffefff);
  3670. gsihal_write_reg(GSI_IC_STOP_INT_MOD_BCK_PRS_MSB, 0xffffffff);
  3671. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_LSB, 0x00000000);
  3672. gsihal_write_reg(GSI_IC_PROCESS_DESC_BCK_PRS_MSB, 0x00000000);
  3673. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_LSB, 0xf9ffffff);
  3674. gsihal_write_reg(GSI_IC_TLV_STOP_BCK_PRS_MSB, 0xffffffff);
  3675. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_LSB, 0xf9ffffff);
  3676. gsihal_write_reg(GSI_IC_TLV_RESET_BCK_PRS_MSB, 0xffffffff);
  3677. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_LSB, 0xffffffff);
  3678. gsihal_write_reg(GSI_IC_RGSTR_TIMER_BCK_PRS_MSB, 0xfffffffe);
  3679. gsihal_write_reg(GSI_IC_READ_BCK_PRS_LSB, 0xffffffff);
  3680. gsihal_write_reg(GSI_IC_READ_BCK_PRS_MSB, 0xffffefff);
  3681. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_LSB, 0xffffffff);
  3682. gsihal_write_reg(GSI_IC_WRITE_BCK_PRS_MSB, 0xffffdfff);
  3683. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB, 0xffffffff);
  3684. gsihal_write_reg(GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB, 0xff03ffff);
  3685. }
  3686. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
  3687. {
  3688. if (!gsi_ctx) {
  3689. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3690. return -GSI_STATUS_NODEV;
  3691. }
  3692. if (!gsi_ctx->base) {
  3693. GSIERR("access to GSI HW has not been mapped\n");
  3694. return -GSI_STATUS_INVALID_PARAMS;
  3695. }
  3696. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3697. GSIERR("Incorrect version %d\n", ver);
  3698. return -GSI_STATUS_ERROR;
  3699. }
  3700. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_MSB, 0);
  3701. gsihal_write_reg(GSI_GSI_PERIPH_BASE_ADDR_LSB, per_base_addr);
  3702. gsi_configure_bck_prs_matrix();
  3703. gsi_configure_ieps(ver);
  3704. return 0;
  3705. }
  3706. EXPORT_SYMBOL(gsi_configure_regs);
  3707. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  3708. {
  3709. struct gsihal_reg_gsi_cfg gsi_cfg;
  3710. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3711. GSIERR("Incorrect version %d\n", ver);
  3712. return -GSI_STATUS_ERROR;
  3713. }
  3714. /* Enable the MCS and set to x2 clocks */
  3715. gsi_cfg.gsi_enable = 1;
  3716. gsi_cfg.double_mcs_clk_freq = 1;
  3717. gsi_cfg.uc_is_mcs = 0;
  3718. gsi_cfg.gsi_pwr_clps = 0;
  3719. gsi_cfg.bp_mtrix_disable = 0;
  3720. if (ver >= GSI_VER_1_2) {
  3721. gsihal_write_reg(GSI_GSI_MCS_CFG, 1);
  3722. gsi_cfg.mcs_enable = 0;
  3723. } else {
  3724. gsi_cfg.mcs_enable = 1;
  3725. }
  3726. /* GSI frequency is peripheral frequency divided by 3 (2+1) */
  3727. if (ver >= GSI_VER_2_5)
  3728. gsi_cfg.sleep_clk_div = 2;
  3729. gsihal_write_reg_fields(GSI_GSI_CFG, &gsi_cfg);
  3730. return 0;
  3731. }
  3732. EXPORT_SYMBOL(gsi_enable_fw);
  3733. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  3734. unsigned long *size, enum gsi_ver ver)
  3735. {
  3736. if (!gsi_ctx) {
  3737. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3738. return;
  3739. }
  3740. if (size)
  3741. *size = gsihal_get_inst_ram_size();
  3742. if (base_offset) {
  3743. *base_offset = gsihal_get_reg_n_ofst(GSI_GSI_INST_RAM_n, 0);
  3744. }
  3745. }
  3746. EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
  3747. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3748. {
  3749. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
  3750. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3751. int res;
  3752. if (!gsi_ctx) {
  3753. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3754. return -GSI_STATUS_NODEV;
  3755. }
  3756. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3757. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3758. return -GSI_STATUS_INVALID_PARAMS;
  3759. }
  3760. mutex_lock(&gsi_ctx->mlock);
  3761. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3762. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3763. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3764. /* invalidate the response */
  3765. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(
  3766. GSI_EE_n_CNTXT_SCRATCH_0, gsi_ctx->per.ee);
  3767. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3768. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3769. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3770. gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
  3771. cmd.opcode = op;
  3772. cmd.virt_chan_idx = chan_idx;
  3773. cmd.ee = ee;
  3774. gsihal_write_reg_n_fields(GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3775. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3776. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3777. if (res == 0) {
  3778. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3779. res = -GSI_STATUS_TIMED_OUT;
  3780. goto free_lock;
  3781. }
  3782. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3783. gsi_ctx->per.ee);
  3784. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3785. GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
  3786. GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
  3787. *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
  3788. res = -GSI_STATUS_AGAIN;
  3789. goto free_lock;
  3790. }
  3791. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3792. GSIERR("No response received\n");
  3793. res = -GSI_STATUS_ERROR;
  3794. goto free_lock;
  3795. }
  3796. res = GSI_STATUS_SUCCESS;
  3797. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3798. free_lock:
  3799. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3800. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  3801. mutex_unlock(&gsi_ctx->mlock);
  3802. return res;
  3803. }
  3804. EXPORT_SYMBOL(gsi_halt_channel_ee);
  3805. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3806. {
  3807. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
  3808. struct gsi_chan_ctx *ctx;
  3809. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3810. int res;
  3811. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3812. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3813. return -GSI_STATUS_INVALID_PARAMS;
  3814. }
  3815. if (ee == 0)
  3816. return gsi_alloc_ap_channel(chan_idx);
  3817. mutex_lock(&gsi_ctx->mlock);
  3818. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3819. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3820. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3821. /* invalidate the response */
  3822. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3823. gsi_ctx->per.ee);
  3824. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3825. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3826. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3827. cmd.opcode = op;
  3828. cmd.virt_chan_idx = chan_idx;
  3829. cmd.ee = ee;
  3830. gsihal_write_reg_n_fields(
  3831. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3832. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3833. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3834. if (res == 0) {
  3835. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3836. res = -GSI_STATUS_TIMED_OUT;
  3837. goto free_lock;
  3838. }
  3839. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3840. gsi_ctx->per.ee);
  3841. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3842. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
  3843. GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
  3844. *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
  3845. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3846. goto free_lock;
  3847. }
  3848. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3849. GSIERR("No response received\n");
  3850. res = -GSI_STATUS_ERROR;
  3851. goto free_lock;
  3852. }
  3853. if (ee == 0) {
  3854. ctx = &gsi_ctx->chan[chan_idx];
  3855. gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
  3856. }
  3857. res = GSI_STATUS_SUCCESS;
  3858. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3859. free_lock:
  3860. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3861. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  3862. mutex_unlock(&gsi_ctx->mlock);
  3863. return res;
  3864. }
  3865. EXPORT_SYMBOL(gsi_alloc_channel_ee);
  3866. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  3867. int *code)
  3868. {
  3869. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
  3870. struct gsihal_reg_ch_k_cntxt_0 ch_k_cntxt_0;
  3871. struct gsihal_reg_gsi_ee_generic_cmd cmd;
  3872. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  3873. int res;
  3874. if (!gsi_ctx) {
  3875. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3876. return -GSI_STATUS_NODEV;
  3877. }
  3878. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3879. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3880. return -GSI_STATUS_INVALID_PARAMS;
  3881. }
  3882. mutex_lock(&gsi_ctx->mlock);
  3883. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3884. gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
  3885. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3886. /* invalidate the response */
  3887. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3888. gsi_ctx->per.ee);
  3889. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3890. gsihal_write_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3891. gsi_ctx->per.ee, gsi_ctx->scratch.word0.val);
  3892. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  3893. cmd.opcode = op;
  3894. cmd.virt_chan_idx = chan_idx;
  3895. cmd.ee = ee;
  3896. gsihal_write_reg_n_fields(
  3897. GSI_EE_n_GSI_EE_GENERIC_CMD, gsi_ctx->per.ee, &cmd);
  3898. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3899. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3900. if (res == 0) {
  3901. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3902. res = -GSI_STATUS_TIMED_OUT;
  3903. goto free_lock;
  3904. }
  3905. gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
  3906. gsi_ctx->per.ee);
  3907. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3908. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  3909. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  3910. chan_idx, ee);
  3911. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  3912. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3913. goto free_lock;
  3914. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3915. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
  3916. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3917. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX) {
  3918. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  3919. chan_idx, ee);
  3920. GSI_ASSERT();
  3921. }
  3922. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3923. GSIERR("No response received\n");
  3924. res = -GSI_STATUS_ERROR;
  3925. goto free_lock;
  3926. }
  3927. /*Reading current channel state*/
  3928. gsihal_read_reg_nk_fields(GSI_EE_n_GSI_CH_k_CNTXT_0,
  3929. gsi_ctx->per.ee, chan_idx, &ch_k_cntxt_0);
  3930. curr_state = ch_k_cntxt_0.chstate;
  3931. if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  3932. GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
  3933. res = GSI_STATUS_SUCCESS;
  3934. } else {
  3935. GSIERR("ch %u state updated to %u incorrect state\n",
  3936. chan_idx, curr_state);
  3937. res = -GSI_STATUS_ERROR;
  3938. }
  3939. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3940. free_lock:
  3941. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3942. gsihal_get_glob_irq_en_gp_int1_mask(), 0);
  3943. mutex_unlock(&gsi_ctx->mlock);
  3944. return res;
  3945. }
  3946. EXPORT_SYMBOL(gsi_enable_flow_control_ee);
  3947. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
  3948. {
  3949. if (!gsi_ctx) {
  3950. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3951. return -GSI_STATUS_NODEV;
  3952. }
  3953. if (!gsi_ctx->base) {
  3954. GSIERR("access to GSI HW has not been mapped\n");
  3955. return -GSI_STATUS_INVALID_PARAMS;
  3956. }
  3957. gsihal_write_reg_nk(GSI_MAP_EE_n_CH_k_VP_TABLE,
  3958. ee, chan_num, per_ep_index);
  3959. return 0;
  3960. }
  3961. EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
  3962. void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
  3963. uint32_t db_addr_low, uint32_t db_addr_high)
  3964. {
  3965. if (!gsi_ctx) {
  3966. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3967. return;
  3968. }
  3969. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  3970. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_10,
  3971. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  3972. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_11,
  3973. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  3974. } else {
  3975. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_12,
  3976. gsi_ctx->per.ee, evt_ring_hdl, db_addr_low);
  3977. gsihal_write_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_13,
  3978. gsi_ctx->per.ee, evt_ring_hdl, db_addr_high);
  3979. }
  3980. }
  3981. EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
  3982. int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
  3983. {
  3984. if (is_rp) {
  3985. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  3986. gsi_ctx->per.ee, chan_hdl);
  3987. } else {
  3988. return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  3989. gsi_ctx->per.ee, chan_hdl);
  3990. }
  3991. }
  3992. EXPORT_SYMBOL(gsi_get_refetch_reg);
  3993. int gsi_get_drop_stats(unsigned long ep_id, int scratch_id)
  3994. {
  3995. /* RTK use scratch 5 */
  3996. if (scratch_id == 5) {
  3997. /*
  3998. * each channel context is 6 lines of 8 bytes, but n in SHRAM_n
  3999. * is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will
  4000. * give the beginning of the required channel context, and then
  4001. * need to add 7 since the channel context layout has the ring
  4002. * rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding
  4003. * additional 28/4 = 7 to get to scratch 5 of the required
  4004. * channel.
  4005. */
  4006. gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7);
  4007. }
  4008. return 0;
  4009. }
  4010. EXPORT_SYMBOL(gsi_get_drop_stats);
  4011. void gsi_wdi3_dump_register(unsigned long chan_hdl)
  4012. {
  4013. uint32_t val;
  4014. if (!gsi_ctx) {
  4015. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4016. return;
  4017. }
  4018. GSIDBG("reg dump ch id %ld\n", chan_hdl);
  4019. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_0,
  4020. gsi_ctx->per.ee, chan_hdl);
  4021. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0 0x%x\n", val);
  4022. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_1,
  4023. gsi_ctx->per.ee, chan_hdl);
  4024. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1 0x%x\n", val);
  4025. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_2,
  4026. gsi_ctx->per.ee, chan_hdl);
  4027. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2 0x%x\n", val);
  4028. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_3,
  4029. gsi_ctx->per.ee, chan_hdl);
  4030. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3 0x%x\n", val);
  4031. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
  4032. gsi_ctx->per.ee, chan_hdl);
  4033. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4 0x%x\n", val);
  4034. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
  4035. gsi_ctx->per.ee, chan_hdl);
  4036. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5 0x%x\n", val);
  4037. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6,
  4038. gsi_ctx->per.ee, chan_hdl);
  4039. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6 0x%x\n", val);
  4040. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
  4041. gsi_ctx->per.ee, chan_hdl);
  4042. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7 0x%x\n", val);
  4043. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
  4044. gsi_ctx->per.ee, chan_hdl);
  4045. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR 0x%x\n", val);
  4046. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
  4047. gsi_ctx->per.ee, chan_hdl);
  4048. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR 0x%x\n", val);
  4049. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_QOS,
  4050. gsi_ctx->per.ee, chan_hdl);
  4051. GSIDBG("GSI_EE_n_GSI_CH_k_QOS 0x%x\n", val);
  4052. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4053. gsi_ctx->per.ee, chan_hdl);
  4054. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0 0x%x\n", val);
  4055. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4056. gsi_ctx->per.ee, chan_hdl);
  4057. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1 0x%x\n", val);
  4058. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4059. gsi_ctx->per.ee, chan_hdl);
  4060. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2 0x%x\n", val);
  4061. val = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4062. gsi_ctx->per.ee, chan_hdl);
  4063. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3 0x%x\n", val);
  4064. }
  4065. EXPORT_SYMBOL(gsi_wdi3_dump_register);
  4066. int gsi_query_aqc_msi_addr(unsigned long chan_hdl, phys_addr_t *addr)
  4067. {
  4068. if (!gsi_ctx) {
  4069. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  4070. return -GSI_STATUS_NODEV;
  4071. }
  4072. if (chan_hdl >= gsi_ctx->max_ch) {
  4073. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  4074. return -GSI_STATUS_INVALID_PARAMS;
  4075. }
  4076. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  4077. GSIERR("bad state %d\n",
  4078. gsi_ctx->chan[chan_hdl].state);
  4079. return -GSI_STATUS_UNSUPPORTED_OP;
  4080. }
  4081. *addr = (phys_addr_t)(gsi_ctx->per.phys_addr +
  4082. gsihal_get_reg_nk_ofst(GSI_EE_n_GSI_CH_k_CNTXT_8,
  4083. gsi_ctx->per.ee, chan_hdl));
  4084. return 0;
  4085. }
  4086. EXPORT_SYMBOL(gsi_query_aqc_msi_addr);
  4087. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  4088. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
  4089. {
  4090. union __packed gsi_channel_scratch scr;
  4091. /* below sequence is not atomic. assumption is sequencer specific fields
  4092. * will remain unchanged across this sequence
  4093. */
  4094. /* READ */
  4095. scr.data.word1 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4096. gsi_ctx->per.ee, chan_hdl);
  4097. scr.data.word2 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4098. gsi_ctx->per.ee, chan_hdl);
  4099. scr.data.word3 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4100. gsi_ctx->per.ee, chan_hdl);
  4101. scr.data.word4 = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4102. gsi_ctx->per.ee, chan_hdl);
  4103. /* UPDATE */
  4104. scr.mhi.polling_mode = mscr.polling_mode;
  4105. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  4106. scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
  4107. scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
  4108. }
  4109. /* WRITE */
  4110. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_0,
  4111. gsi_ctx->per.ee, chan_hdl, scr.data.word1);
  4112. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_1,
  4113. gsi_ctx->per.ee, chan_hdl, scr.data.word2);
  4114. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_2,
  4115. gsi_ctx->per.ee, chan_hdl, scr.data.word3);
  4116. gsihal_write_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_3,
  4117. gsi_ctx->per.ee, chan_hdl, scr.data.word4);
  4118. return scr;
  4119. }
  4120. static int msm_gsi_probe(struct platform_device *pdev)
  4121. {
  4122. struct device *dev = &pdev->dev;
  4123. pr_debug("gsi_probe\n");
  4124. gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
  4125. if (!gsi_ctx) {
  4126. dev_err(dev, "failed to allocated gsi context\n");
  4127. return -ENOMEM;
  4128. }
  4129. gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
  4130. "gsi", 0);
  4131. if (gsi_ctx->ipc_logbuf == NULL)
  4132. GSIERR("failed to create IPC log, continue...\n");
  4133. gsi_ctx->dev = dev;
  4134. init_completion(&gsi_ctx->gen_ee_cmd_compl);
  4135. gsi_debugfs_init();
  4136. return 0;
  4137. }
  4138. static struct platform_driver msm_gsi_driver = {
  4139. .probe = msm_gsi_probe,
  4140. .driver = {
  4141. .name = "gsi",
  4142. .of_match_table = msm_gsi_match,
  4143. },
  4144. };
  4145. static struct platform_device *pdev;
  4146. /**
  4147. * Module Init.
  4148. */
  4149. static int __init gsi_init(void)
  4150. {
  4151. int ret;
  4152. pr_debug("%s\n", __func__);
  4153. ret = platform_driver_register(&msm_gsi_driver);
  4154. if (ret < 0)
  4155. goto out;
  4156. if (running_emulation) {
  4157. pdev = platform_device_register_simple("gsi", -1, NULL, 0);
  4158. if (IS_ERR(pdev)) {
  4159. ret = PTR_ERR(pdev);
  4160. platform_driver_unregister(&msm_gsi_driver);
  4161. goto out;
  4162. }
  4163. }
  4164. out:
  4165. return ret;
  4166. }
  4167. arch_initcall(gsi_init);
  4168. /*
  4169. * Module exit.
  4170. */
  4171. static void __exit gsi_exit(void)
  4172. {
  4173. if (running_emulation && pdev)
  4174. platform_device_unregister(pdev);
  4175. platform_driver_unregister(&msm_gsi_driver);
  4176. }
  4177. module_exit(gsi_exit);
  4178. MODULE_LICENSE("GPL v2");
  4179. MODULE_DESCRIPTION("Generic Software Interface (GSI)");