gsi.c 133 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/io.h>
  8. #include <linux/log2.h>
  9. #include <linux/module.h>
  10. #include <linux/msm_gsi.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/delay.h>
  13. #include "gsi.h"
  14. #include "gsi_reg.h"
  15. #include "gsi_emulation.h"
  16. #define GSI_CMD_TIMEOUT (5*HZ)
  17. #define GSI_START_CMD_TIMEOUT_MS 1000
  18. #define GSI_CMD_POLL_CNT 5
  19. #define GSI_STOP_CMD_TIMEOUT_MS 200
  20. #define GSI_MAX_CH_LOW_WEIGHT 15
  21. #define GSI_IRQ_STORM_THR 5
  22. #define GSI_STOP_CMD_POLL_CNT 4
  23. #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
  24. #define GSI_RESET_WA_MIN_SLEEP 1000
  25. #define GSI_RESET_WA_MAX_SLEEP 2000
  26. #define GSI_CHNL_STATE_MAX_RETRYCNT 10
  27. #define GSI_STTS_REG_BITS 32
  28. #ifndef CONFIG_DEBUG_FS
  29. void gsi_debugfs_init(void)
  30. {
  31. }
  32. #endif
  33. static const struct of_device_id msm_gsi_match[] = {
  34. { .compatible = "qcom,msm_gsi", },
  35. { },
  36. };
  37. #if defined(CONFIG_IPA_EMULATION)
  38. static bool running_emulation = true;
  39. #else
  40. static bool running_emulation;
  41. #endif
  42. struct gsi_ctx *gsi_ctx;
  43. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  44. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr);
  45. static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
  46. {
  47. uint32_t curr;
  48. curr = gsi_readl(gsi_ctx->base +
  49. GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
  50. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  51. GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
  52. }
  53. static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
  54. {
  55. uint32_t curr;
  56. curr = gsi_readl(gsi_ctx->base +
  57. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
  58. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  59. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
  60. }
  61. static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
  62. {
  63. uint32_t curr;
  64. curr = gsi_readl(gsi_ctx->base +
  65. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
  66. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  67. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
  68. }
  69. static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
  70. {
  71. uint32_t curr;
  72. curr = gsi_readl(gsi_ctx->base +
  73. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  74. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  75. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  76. GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
  77. curr, ((curr & ~mask) | (val & mask)));
  78. }
  79. static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
  80. {
  81. uint32_t curr;
  82. curr = gsi_readl(gsi_ctx->base +
  83. GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
  84. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  85. GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
  86. }
  87. static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
  88. {
  89. uint32_t curr;
  90. curr = gsi_readl(gsi_ctx->base +
  91. GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
  92. gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
  93. GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
  94. }
  95. static void gsi_channel_state_change_wait(unsigned long chan_hdl,
  96. struct gsi_chan_ctx *ctx,
  97. uint32_t tm, enum gsi_ch_cmd_opcode op)
  98. {
  99. int poll_cnt;
  100. int gsi_pending_intr;
  101. int res;
  102. uint32_t type;
  103. uint32_t val;
  104. int ee = gsi_ctx->per.ee;
  105. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  106. int stop_in_proc_retry = 0;
  107. int stop_retry = 0;
  108. /*
  109. * Start polling the GSI channel for
  110. * duration = tm * GSI_CMD_POLL_CNT.
  111. * We need to do polling of gsi state for improving debugability
  112. * of gsi hw state.
  113. */
  114. for (poll_cnt = 0;
  115. poll_cnt < GSI_CMD_POLL_CNT;
  116. poll_cnt++) {
  117. res = wait_for_completion_timeout(&ctx->compl,
  118. msecs_to_jiffies(tm));
  119. /* Interrupt received, return */
  120. if (res != 0)
  121. return;
  122. type = gsi_readl(gsi_ctx->base +
  123. GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(gsi_ctx->per.ee));
  124. gsi_pending_intr = gsi_readl(gsi_ctx->base +
  125. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
  126. /* Update the channel state only if interrupt was raised
  127. * on praticular channel and also checking global interrupt
  128. * is raised for channel control.
  129. */
  130. if ((type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) &&
  131. ((gsi_pending_intr >> chan_hdl) & 1)) {
  132. /*
  133. * Check channel state here in case the channel is
  134. * already started but interrupt is not yet received.
  135. */
  136. val = gsi_readl(gsi_ctx->base +
  137. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
  138. gsi_ctx->per.ee));
  139. curr_state = (val &
  140. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  141. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  142. }
  143. if (op == GSI_CH_START) {
  144. if (curr_state == GSI_CHAN_STATE_STARTED ||
  145. curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  146. ctx->state = curr_state;
  147. return;
  148. }
  149. }
  150. if (op == GSI_CH_STOP) {
  151. if (curr_state == GSI_CHAN_STATE_STOPPED)
  152. stop_retry++;
  153. else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
  154. stop_in_proc_retry++;
  155. }
  156. /* if interrupt marked reg after poll count reaching to max
  157. * keep loop to continue reach max stop proc and max stop count.
  158. */
  159. if (stop_retry == 1 || stop_in_proc_retry == 1)
  160. poll_cnt = 0;
  161. /* If stop channel retry reached to max count
  162. * clear the pending interrupt, if channel already stopped.
  163. */
  164. if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
  165. gsi_writel(gsi_pending_intr, gsi_ctx->base +
  166. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  167. ctx->state = curr_state;
  168. return;
  169. }
  170. /* If channel state stop in progress case no need
  171. * to wait for long time.
  172. */
  173. if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
  174. ctx->state = curr_state;
  175. return;
  176. }
  177. GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
  178. chan_hdl,
  179. type,
  180. ctx->state,
  181. gsi_pending_intr);
  182. }
  183. GSIDBG("invalidating the channel state when timeout happens\n");
  184. ctx->state = curr_state;
  185. }
  186. static void gsi_handle_ch_ctrl(int ee)
  187. {
  188. uint32_t ch;
  189. int i;
  190. uint32_t val;
  191. struct gsi_chan_ctx *ctx;
  192. ch = gsi_readl(gsi_ctx->base +
  193. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
  194. gsi_writel(ch, gsi_ctx->base +
  195. GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  196. GSIDBG("ch %x\n", ch);
  197. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  198. if ((1 << i) & ch) {
  199. if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
  200. GSIERR("invalid channel %d\n", i);
  201. break;
  202. }
  203. ctx = &gsi_ctx->chan[i];
  204. val = gsi_readl(gsi_ctx->base +
  205. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
  206. ctx->state = (val &
  207. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  208. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  209. GSIDBG("ch %u state updated to %u\n", i, ctx->state);
  210. complete(&ctx->compl);
  211. gsi_ctx->ch_dbg[i].cmd_completed++;
  212. }
  213. }
  214. }
  215. static void gsi_handle_ev_ctrl(int ee)
  216. {
  217. uint32_t ch;
  218. int i;
  219. uint32_t val;
  220. struct gsi_evt_ctx *ctx;
  221. ch = gsi_readl(gsi_ctx->base +
  222. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
  223. gsi_writel(ch, gsi_ctx->base +
  224. GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
  225. GSIDBG("ev %x\n", ch);
  226. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  227. if ((1 << i) & ch) {
  228. if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
  229. GSIERR("invalid event %d\n", i);
  230. break;
  231. }
  232. ctx = &gsi_ctx->evtr[i];
  233. val = gsi_readl(gsi_ctx->base +
  234. GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
  235. ctx->state = (val &
  236. GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  237. GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
  238. GSIDBG("evt %u state updated to %u\n", i, ctx->state);
  239. complete(&ctx->compl);
  240. }
  241. }
  242. }
  243. static void gsi_handle_glob_err(uint32_t err)
  244. {
  245. struct gsi_log_err *log;
  246. struct gsi_chan_ctx *ch;
  247. struct gsi_evt_ctx *ev;
  248. struct gsi_chan_err_notify chan_notify;
  249. struct gsi_evt_err_notify evt_notify;
  250. struct gsi_per_notify per_notify;
  251. uint32_t val;
  252. enum gsi_err_type err_type;
  253. log = (struct gsi_log_err *)&err;
  254. GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
  255. log->virt_idx);
  256. GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
  257. log->arg2, log->arg3);
  258. err_type = log->err_type;
  259. /*
  260. * These are errors thrown by hardware. We need
  261. * BUG_ON() to capture the hardware state right
  262. * when it is unexpected.
  263. */
  264. switch (err_type) {
  265. case GSI_ERR_TYPE_GLOB:
  266. per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
  267. per_notify.user_data = gsi_ctx->per.user_data;
  268. per_notify.data.err_desc = err & 0xFFFF;
  269. gsi_ctx->per.notify_cb(&per_notify);
  270. break;
  271. case GSI_ERR_TYPE_CHAN:
  272. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
  273. GSIERR("Unexpected ch %d\n", log->virt_idx);
  274. return;
  275. }
  276. ch = &gsi_ctx->chan[log->virt_idx];
  277. chan_notify.chan_user_data = ch->props.chan_user_data;
  278. chan_notify.err_desc = err & 0xFFFF;
  279. if (log->code == GSI_INVALID_TRE_ERR) {
  280. if (log->ee != gsi_ctx->per.ee) {
  281. GSIERR("unexpected EE in event %d\n", log->ee);
  282. GSI_ASSERT();
  283. }
  284. val = gsi_readl(gsi_ctx->base +
  285. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
  286. gsi_ctx->per.ee));
  287. ch->state = (val &
  288. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  289. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  290. GSIDBG("ch %u state updated to %u\n", log->virt_idx,
  291. ch->state);
  292. ch->stats.invalid_tre_error++;
  293. if (ch->state == GSI_CHAN_STATE_ERROR) {
  294. GSIERR("Unexpected channel state %d\n",
  295. ch->state);
  296. GSI_ASSERT();
  297. }
  298. chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
  299. } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  300. if (log->ee != gsi_ctx->per.ee) {
  301. GSIERR("unexpected EE in event %d\n", log->ee);
  302. GSI_ASSERT();
  303. }
  304. chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
  305. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  306. if (log->ee != gsi_ctx->per.ee) {
  307. GSIERR("unexpected EE in event %d\n", log->ee);
  308. GSI_ASSERT();
  309. }
  310. chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
  311. complete(&ch->compl);
  312. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  313. chan_notify.evt_id =
  314. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
  315. } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
  316. if (log->ee != gsi_ctx->per.ee) {
  317. GSIERR("unexpected EE in event %d\n", log->ee);
  318. GSI_ASSERT();
  319. }
  320. chan_notify.evt_id =
  321. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
  322. } else if (log->code == GSI_HWO_1_ERR) {
  323. if (log->ee != gsi_ctx->per.ee) {
  324. GSIERR("unexpected EE in event %d\n", log->ee);
  325. GSI_ASSERT();
  326. }
  327. chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
  328. } else {
  329. GSIERR("unexpected event log code %d\n", log->code);
  330. GSI_ASSERT();
  331. }
  332. ch->props.err_cb(&chan_notify);
  333. break;
  334. case GSI_ERR_TYPE_EVT:
  335. if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
  336. GSIERR("Unexpected ev %d\n", log->virt_idx);
  337. return;
  338. }
  339. ev = &gsi_ctx->evtr[log->virt_idx];
  340. evt_notify.user_data = ev->props.user_data;
  341. evt_notify.err_desc = err & 0xFFFF;
  342. if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
  343. if (log->ee != gsi_ctx->per.ee) {
  344. GSIERR("unexpected EE in event %d\n", log->ee);
  345. GSI_ASSERT();
  346. }
  347. evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
  348. } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
  349. if (log->ee != gsi_ctx->per.ee) {
  350. GSIERR("unexpected EE in event %d\n", log->ee);
  351. GSI_ASSERT();
  352. }
  353. evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
  354. complete(&ev->compl);
  355. } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
  356. evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
  357. } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
  358. if (log->ee != gsi_ctx->per.ee) {
  359. GSIERR("unexpected EE in event %d\n", log->ee);
  360. GSI_ASSERT();
  361. }
  362. evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
  363. } else {
  364. GSIERR("unexpected event log code %d\n", log->code);
  365. GSI_ASSERT();
  366. }
  367. ev->props.err_cb(&evt_notify);
  368. break;
  369. }
  370. }
  371. static void gsi_handle_gp_int1(void)
  372. {
  373. complete(&gsi_ctx->gen_ee_cmd_compl);
  374. }
  375. static void gsi_handle_glob_ee(int ee)
  376. {
  377. uint32_t val;
  378. uint32_t err;
  379. struct gsi_per_notify notify;
  380. uint32_t clr = ~0;
  381. val = gsi_readl(gsi_ctx->base +
  382. GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
  383. notify.user_data = gsi_ctx->per.user_data;
  384. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
  385. err = gsi_readl(gsi_ctx->base +
  386. GSI_EE_n_ERROR_LOG_OFFS(ee));
  387. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  388. gsi_writel(0, gsi_ctx->base +
  389. GSI_EE_n_ERROR_LOG_OFFS(ee));
  390. gsi_writel(clr, gsi_ctx->base +
  391. GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
  392. gsi_handle_glob_err(err);
  393. }
  394. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK)
  395. gsi_handle_gp_int1();
  396. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
  397. notify.evt_id = GSI_PER_EVT_GLOB_GP2;
  398. gsi_ctx->per.notify_cb(&notify);
  399. }
  400. if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
  401. notify.evt_id = GSI_PER_EVT_GLOB_GP3;
  402. gsi_ctx->per.notify_cb(&notify);
  403. }
  404. gsi_writel(val, gsi_ctx->base +
  405. GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
  406. }
  407. static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
  408. {
  409. ctx->wp_local += ctx->elem_sz;
  410. if (ctx->wp_local == ctx->end)
  411. ctx->wp_local = ctx->base;
  412. }
  413. static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
  414. {
  415. ctx->rp_local += ctx->elem_sz;
  416. if (ctx->rp_local == ctx->end)
  417. ctx->rp_local = ctx->base;
  418. }
  419. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
  420. {
  421. WARN_ON(addr < ctx->base || addr >= ctx->end);
  422. return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
  423. }
  424. static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
  425. uint64_t addr2)
  426. {
  427. uint32_t addr_diff;
  428. GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
  429. ctx->base, ctx->end);
  430. if (addr1 < ctx->base || addr1 >= ctx->end) {
  431. GSIERR("address = 0x%llx not in range\n", addr1);
  432. GSI_ASSERT();
  433. }
  434. if (addr2 < ctx->base || addr2 >= ctx->end) {
  435. GSIERR("address = 0x%llx not in range\n", addr2);
  436. GSI_ASSERT();
  437. }
  438. addr_diff = (uint32_t)(addr2 - addr1);
  439. if (addr1 < addr2)
  440. return addr_diff / ctx->elem_sz;
  441. else
  442. return (addr_diff + ctx->len) / ctx->elem_sz;
  443. }
  444. static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
  445. struct gsi_chan_xfer_notify *notify, bool callback)
  446. {
  447. uint32_t ch_id;
  448. struct gsi_chan_ctx *ch_ctx;
  449. uint16_t rp_idx;
  450. uint64_t rp;
  451. ch_id = evt->chid;
  452. if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
  453. GSIERR("Unexpected ch %d\n", ch_id);
  454. return;
  455. }
  456. ch_ctx = &gsi_ctx->chan[ch_id];
  457. if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
  458. ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
  459. return;
  460. if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
  461. rp = evt->xfer_ptr;
  462. if (ch_ctx->ring.rp_local != rp) {
  463. ch_ctx->stats.completed +=
  464. gsi_get_complete_num(&ch_ctx->ring,
  465. ch_ctx->ring.rp_local, rp);
  466. ch_ctx->ring.rp_local = rp;
  467. }
  468. /*
  469. * Increment RP local only in polling context to avoid
  470. * sys len mismatch.
  471. */
  472. if (!(callback && ch_ctx->props.dir ==
  473. GSI_CHAN_DIR_FROM_GSI))
  474. /* the element at RP is also processed */
  475. gsi_incr_ring_rp(&ch_ctx->ring);
  476. ch_ctx->ring.rp = ch_ctx->ring.rp_local;
  477. rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
  478. notify->veid = GSI_VEID_DEFAULT;
  479. } else {
  480. rp_idx = evt->cookie;
  481. notify->veid = evt->veid;
  482. }
  483. WARN_ON(!ch_ctx->user_data[rp_idx].valid);
  484. notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
  485. /*
  486. * In suspend just before stopping the channel possible to receive
  487. * the IEOB interrupt and xfer pointer will not be processed in this
  488. * mode and moving channel poll mode. In resume after starting the
  489. * channel will receive the IEOB interrupt and xfer pointer will be
  490. * overwritten. To avoid this process all data in polling context.
  491. */
  492. if (!(callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)) {
  493. ch_ctx->stats.completed++;
  494. ch_ctx->user_data[rp_idx].valid = false;
  495. }
  496. notify->chan_user_data = ch_ctx->props.chan_user_data;
  497. notify->evt_id = evt->code;
  498. notify->bytes_xfered = evt->len;
  499. if (callback) {
  500. if (atomic_read(&ch_ctx->poll_mode)) {
  501. GSIERR("Calling client callback in polling mode\n");
  502. WARN_ON(1);
  503. }
  504. ch_ctx->props.xfer_cb(notify);
  505. }
  506. }
  507. static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
  508. struct gsi_chan_xfer_notify *notify, bool callback)
  509. {
  510. struct gsi_xfer_compl_evt *evt;
  511. struct gsi_chan_ctx *ch_ctx;
  512. evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
  513. ctx->ring.rp_local - ctx->ring.base);
  514. gsi_process_chan(evt, notify, callback);
  515. /*
  516. * Increment RP local only in polling context to avoid
  517. * sys len mismatch.
  518. */
  519. ch_ctx = &gsi_ctx->chan[evt->chid];
  520. if (callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  521. return;
  522. gsi_incr_ring_rp(&ctx->ring);
  523. /* recycle this element */
  524. gsi_incr_ring_wp(&ctx->ring);
  525. ctx->stats.completed++;
  526. }
  527. static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
  528. {
  529. uint32_t val;
  530. ctx->ring.wp = ctx->ring.wp_local;
  531. val = (ctx->ring.wp_local &
  532. GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
  533. GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
  534. gsi_writel(val, gsi_ctx->base +
  535. GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
  536. gsi_ctx->per.ee));
  537. }
  538. static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
  539. {
  540. uint32_t val;
  541. /*
  542. * allocate new events for this channel first
  543. * before submitting the new TREs.
  544. * for TO_GSI channels the event ring doorbell is rang as part of
  545. * interrupt handling.
  546. */
  547. if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  548. gsi_ring_evt_doorbell(ctx->evtr);
  549. ctx->ring.wp = ctx->ring.wp_local;
  550. val = (ctx->ring.wp_local &
  551. GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
  552. GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
  553. gsi_writel(val, gsi_ctx->base +
  554. GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
  555. gsi_ctx->per.ee));
  556. }
  557. static void gsi_handle_ieob(int ee)
  558. {
  559. uint32_t ch;
  560. int i;
  561. uint64_t rp;
  562. struct gsi_evt_ctx *ctx;
  563. struct gsi_chan_xfer_notify notify;
  564. unsigned long flags;
  565. unsigned long cntr;
  566. uint32_t msk;
  567. bool empty;
  568. ch = gsi_readl(gsi_ctx->base +
  569. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
  570. msk = gsi_readl(gsi_ctx->base +
  571. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
  572. gsi_writel(ch & msk, gsi_ctx->base +
  573. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  574. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  575. if ((1 << i) & ch & msk) {
  576. if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
  577. GSIERR("invalid event %d\n", i);
  578. break;
  579. }
  580. ctx = &gsi_ctx->evtr[i];
  581. /*
  582. * Don't handle MSI interrupts, only handle IEOB
  583. * IRQs
  584. */
  585. if (ctx->props.intr == GSI_INTR_MSI)
  586. continue;
  587. if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
  588. GSIERR("Unexpected irq intf %d\n",
  589. ctx->props.intf);
  590. GSI_ASSERT();
  591. }
  592. spin_lock_irqsave(&ctx->ring.slock, flags);
  593. check_again:
  594. cntr = 0;
  595. empty = true;
  596. rp = ctx->props.gsi_read_event_ring_rp(&ctx->props,
  597. ctx->id, ee);
  598. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  599. ctx->ring.rp = rp;
  600. while (ctx->ring.rp_local != rp) {
  601. ++cntr;
  602. if (ctx->props.exclusive &&
  603. atomic_read(&ctx->chan->poll_mode)) {
  604. cntr = 0;
  605. break;
  606. }
  607. gsi_process_evt_re(ctx, &notify, true);
  608. empty = false;
  609. }
  610. if (!empty)
  611. gsi_ring_evt_doorbell(ctx);
  612. if (cntr != 0)
  613. goto check_again;
  614. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  615. }
  616. }
  617. }
  618. static void gsi_handle_inter_ee_ch_ctrl(int ee)
  619. {
  620. uint32_t ch;
  621. int i;
  622. ch = gsi_readl(gsi_ctx->base +
  623. GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
  624. gsi_writel(ch, gsi_ctx->base +
  625. GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
  626. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  627. if ((1 << i) & ch) {
  628. /* not currently expected */
  629. GSIERR("ch %u was inter-EE changed\n", i);
  630. }
  631. }
  632. }
  633. static void gsi_handle_inter_ee_ev_ctrl(int ee)
  634. {
  635. uint32_t ch;
  636. int i;
  637. ch = gsi_readl(gsi_ctx->base +
  638. GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
  639. gsi_writel(ch, gsi_ctx->base +
  640. GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
  641. for (i = 0; i < GSI_STTS_REG_BITS; i++) {
  642. if ((1 << i) & ch) {
  643. /* not currently expected */
  644. GSIERR("evt %u was inter-EE changed\n", i);
  645. }
  646. }
  647. }
  648. static void gsi_handle_general(int ee)
  649. {
  650. uint32_t val;
  651. struct gsi_per_notify notify;
  652. val = gsi_readl(gsi_ctx->base +
  653. GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
  654. notify.user_data = gsi_ctx->per.user_data;
  655. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
  656. notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
  657. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
  658. notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
  659. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
  660. notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
  661. if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
  662. notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
  663. if (gsi_ctx->per.notify_cb)
  664. gsi_ctx->per.notify_cb(&notify);
  665. gsi_writel(val, gsi_ctx->base +
  666. GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
  667. }
  668. #define GSI_ISR_MAX_ITER 50
  669. static void gsi_handle_irq(void)
  670. {
  671. uint32_t type;
  672. int ee = gsi_ctx->per.ee;
  673. unsigned long cnt = 0;
  674. while (1) {
  675. if (!gsi_ctx->per.clk_status_cb())
  676. break;
  677. type = gsi_readl(gsi_ctx->base +
  678. GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
  679. if (!type)
  680. break;
  681. GSIDBG_LOW("type 0x%x\n", type);
  682. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
  683. gsi_handle_ch_ctrl(ee);
  684. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
  685. gsi_handle_ev_ctrl(ee);
  686. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
  687. gsi_handle_glob_ee(ee);
  688. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
  689. gsi_handle_ieob(ee);
  690. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
  691. gsi_handle_inter_ee_ch_ctrl(ee);
  692. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
  693. gsi_handle_inter_ee_ev_ctrl(ee);
  694. if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
  695. gsi_handle_general(ee);
  696. if (++cnt > GSI_ISR_MAX_ITER) {
  697. /*
  698. * Max number of spurious interrupts from hardware.
  699. * Unexpected hardware state.
  700. */
  701. GSIERR("Too many spurious interrupt from GSI HW\n");
  702. GSI_ASSERT();
  703. }
  704. }
  705. }
  706. static irqreturn_t gsi_isr(int irq, void *ctxt)
  707. {
  708. if (gsi_ctx->per.req_clk_cb) {
  709. bool granted = false;
  710. gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
  711. if (granted) {
  712. gsi_handle_irq();
  713. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  714. }
  715. } else if (!gsi_ctx->per.clk_status_cb()) {
  716. /* we only want to capture the gsi isr storm here */
  717. if (atomic_read(&gsi_ctx->num_unclock_irq) ==
  718. GSI_IRQ_STORM_THR)
  719. gsi_ctx->per.enable_clk_bug_on();
  720. atomic_inc(&gsi_ctx->num_unclock_irq);
  721. return IRQ_HANDLED;
  722. } else {
  723. atomic_set(&gsi_ctx->num_unclock_irq, 0);
  724. gsi_handle_irq();
  725. }
  726. return IRQ_HANDLED;
  727. }
  728. static uint32_t gsi_get_max_channels(enum gsi_ver ver)
  729. {
  730. uint32_t reg = 0;
  731. switch (ver) {
  732. case GSI_VER_ERR:
  733. case GSI_VER_MAX:
  734. GSIERR("GSI version is not supported %d\n", ver);
  735. WARN_ON(1);
  736. break;
  737. case GSI_VER_1_0:
  738. reg = gsi_readl(gsi_ctx->base +
  739. GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
  740. reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
  741. GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
  742. break;
  743. case GSI_VER_1_2:
  744. reg = gsi_readl(gsi_ctx->base +
  745. GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
  746. reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
  747. GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
  748. break;
  749. case GSI_VER_1_3:
  750. reg = gsi_readl(gsi_ctx->base +
  751. GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  752. reg = (reg &
  753. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  754. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  755. break;
  756. case GSI_VER_2_0:
  757. reg = gsi_readl(gsi_ctx->base +
  758. GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  759. reg = (reg &
  760. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  761. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  762. break;
  763. case GSI_VER_2_2:
  764. reg = gsi_readl(gsi_ctx->base +
  765. GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  766. reg = (reg &
  767. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  768. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  769. break;
  770. case GSI_VER_2_5:
  771. reg = gsi_readl(gsi_ctx->base +
  772. GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  773. reg = (reg &
  774. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  775. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  776. break;
  777. case GSI_VER_2_7:
  778. reg = gsi_readl(gsi_ctx->base +
  779. GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  780. reg = (reg &
  781. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  782. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  783. break;
  784. case GSI_VER_2_9:
  785. reg = gsi_readl(gsi_ctx->base +
  786. GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  787. reg = (reg &
  788. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  789. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  790. break;
  791. case GSI_VER_2_11:
  792. reg = gsi_readl(gsi_ctx->base +
  793. GSI_V2_11_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  794. reg = (reg &
  795. GSI_V2_11_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
  796. GSI_V2_11_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
  797. break;
  798. default:
  799. GSIERR("GSI version is not supported %d\n", ver);
  800. break;
  801. }
  802. GSIDBG("max channels %d\n", reg);
  803. return reg;
  804. }
  805. static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
  806. {
  807. uint32_t reg = 0;
  808. switch (ver) {
  809. case GSI_VER_ERR:
  810. case GSI_VER_MAX:
  811. GSIERR("GSI version is not supported %d\n", ver);
  812. WARN_ON(1);
  813. break;
  814. case GSI_VER_1_0:
  815. reg = gsi_readl(gsi_ctx->base +
  816. GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
  817. reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
  818. GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
  819. break;
  820. case GSI_VER_1_2:
  821. reg = gsi_readl(gsi_ctx->base +
  822. GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
  823. reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
  824. GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
  825. break;
  826. case GSI_VER_1_3:
  827. reg = gsi_readl(gsi_ctx->base +
  828. GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  829. reg = (reg &
  830. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  831. GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  832. break;
  833. case GSI_VER_2_0:
  834. reg = gsi_readl(gsi_ctx->base +
  835. GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  836. reg = (reg &
  837. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  838. GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  839. break;
  840. case GSI_VER_2_2:
  841. reg = gsi_readl(gsi_ctx->base +
  842. GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  843. reg = (reg &
  844. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  845. GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  846. break;
  847. case GSI_VER_2_5:
  848. reg = gsi_readl(gsi_ctx->base +
  849. GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  850. reg = (reg &
  851. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  852. GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  853. break;
  854. case GSI_VER_2_7:
  855. reg = gsi_readl(gsi_ctx->base +
  856. GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  857. reg = (reg &
  858. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  859. GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  860. break;
  861. case GSI_VER_2_9:
  862. reg = gsi_readl(gsi_ctx->base +
  863. GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  864. reg = (reg &
  865. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  866. GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  867. break;
  868. case GSI_VER_2_11:
  869. reg = gsi_readl(gsi_ctx->base +
  870. GSI_V2_11_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
  871. reg = (reg &
  872. GSI_V2_11_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
  873. GSI_V2_11_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
  874. break;
  875. default:
  876. GSIERR("GSI version is not supported %d\n", ver);
  877. break;
  878. }
  879. GSIDBG("max event rings %d\n", reg);
  880. return reg;
  881. }
  882. int gsi_complete_clk_grant(unsigned long dev_hdl)
  883. {
  884. unsigned long flags;
  885. if (!gsi_ctx) {
  886. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  887. return -GSI_STATUS_NODEV;
  888. }
  889. if (!gsi_ctx->per_registered) {
  890. GSIERR("no client registered\n");
  891. return -GSI_STATUS_INVALID_PARAMS;
  892. }
  893. if (dev_hdl != (uintptr_t)gsi_ctx) {
  894. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  895. gsi_ctx);
  896. return -GSI_STATUS_INVALID_PARAMS;
  897. }
  898. spin_lock_irqsave(&gsi_ctx->slock, flags);
  899. gsi_handle_irq();
  900. gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
  901. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  902. return GSI_STATUS_SUCCESS;
  903. }
  904. EXPORT_SYMBOL(gsi_complete_clk_grant);
  905. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size)
  906. {
  907. if (!gsi_ctx) {
  908. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  909. return -GSI_STATUS_NODEV;
  910. }
  911. gsi_ctx->base = devm_ioremap(
  912. gsi_ctx->dev, gsi_base_addr, gsi_size);
  913. if (!gsi_ctx->base) {
  914. GSIERR("failed to map access to GSI HW\n");
  915. return -GSI_STATUS_RES_ALLOC_FAILURE;
  916. }
  917. GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
  918. &gsi_base_addr,
  919. gsi_ctx->base,
  920. gsi_size);
  921. return 0;
  922. }
  923. EXPORT_SYMBOL(gsi_map_base);
  924. int gsi_unmap_base(void)
  925. {
  926. if (!gsi_ctx) {
  927. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  928. return -GSI_STATUS_NODEV;
  929. }
  930. if (!gsi_ctx->base) {
  931. GSIERR("access to GSI HW has not been mapped\n");
  932. return -GSI_STATUS_INVALID_PARAMS;
  933. }
  934. devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
  935. gsi_ctx->base = NULL;
  936. return 0;
  937. }
  938. EXPORT_SYMBOL(gsi_unmap_base);
  939. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
  940. {
  941. int res;
  942. uint32_t val;
  943. int needed_reg_ver;
  944. if (!gsi_ctx) {
  945. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  946. return -GSI_STATUS_NODEV;
  947. }
  948. if (!props || !dev_hdl) {
  949. GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
  950. return -GSI_STATUS_INVALID_PARAMS;
  951. }
  952. if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
  953. GSIERR("bad params gsi_ver=%d\n", props->ver);
  954. return -GSI_STATUS_INVALID_PARAMS;
  955. }
  956. if (!props->notify_cb) {
  957. GSIERR("notify callback must be provided\n");
  958. return -GSI_STATUS_INVALID_PARAMS;
  959. }
  960. if (props->req_clk_cb && !props->rel_clk_cb) {
  961. GSIERR("rel callback must be provided\n");
  962. return -GSI_STATUS_INVALID_PARAMS;
  963. }
  964. if (gsi_ctx->per_registered) {
  965. GSIERR("per already registered\n");
  966. return -GSI_STATUS_UNSUPPORTED_OP;
  967. }
  968. switch (props->ver) {
  969. case GSI_VER_1_0:
  970. case GSI_VER_1_2:
  971. case GSI_VER_1_3:
  972. case GSI_VER_2_0:
  973. case GSI_VER_2_2:
  974. needed_reg_ver = GSI_REGISTER_VER_1;
  975. break;
  976. case GSI_VER_2_5:
  977. case GSI_VER_2_7:
  978. case GSI_VER_2_9:
  979. case GSI_VER_2_11:
  980. needed_reg_ver = GSI_REGISTER_VER_2;
  981. break;
  982. case GSI_VER_ERR:
  983. case GSI_VER_MAX:
  984. default:
  985. GSIERR("GSI version is not supported %d\n", props->ver);
  986. return -GSI_STATUS_INVALID_PARAMS;
  987. }
  988. if (needed_reg_ver != GSI_REGISTER_VER_CURRENT) {
  989. GSIERR("Invalid register version. current=%d, needed=%d\n",
  990. GSI_REGISTER_VER_CURRENT, needed_reg_ver);
  991. return -GSI_STATUS_UNSUPPORTED_OP;
  992. }
  993. GSIDBG("gsi ver %d register ver %d needed register ver %d\n",
  994. props->ver, GSI_REGISTER_VER_CURRENT, needed_reg_ver);
  995. spin_lock_init(&gsi_ctx->slock);
  996. if (props->intr == GSI_INTR_IRQ) {
  997. if (!props->irq) {
  998. GSIERR("bad irq specified %u\n", props->irq);
  999. return -GSI_STATUS_INVALID_PARAMS;
  1000. }
  1001. /*
  1002. * On a real UE, there are two separate interrupt
  1003. * vectors that get directed toward the GSI/IPA
  1004. * drivers. They are handled by gsi_isr() and
  1005. * (ipa_isr() or ipa3_isr()) respectively. In the
  1006. * emulation environment, this is not the case;
  1007. * instead, interrupt vectors are routed to the
  1008. * emualation hardware's interrupt controller, which
  1009. * in turn, forwards a single interrupt to the GSI/IPA
  1010. * driver. When the new interrupt vector is received,
  1011. * the driver needs to probe the interrupt
  1012. * controller's registers so see if one, the other, or
  1013. * both interrupts have occurred. Given the above, we
  1014. * now need to handle both situations, namely: the
  1015. * emulator's and the real UE.
  1016. */
  1017. if (running_emulation) {
  1018. /*
  1019. * New scheme involving the emulator's
  1020. * interrupt controller.
  1021. */
  1022. res = devm_request_threaded_irq(
  1023. gsi_ctx->dev,
  1024. props->irq,
  1025. /* top half handler to follow */
  1026. emulator_hard_irq_isr,
  1027. /* threaded bottom half handler to follow */
  1028. emulator_soft_irq_isr,
  1029. IRQF_SHARED,
  1030. "emulator_intcntrlr",
  1031. gsi_ctx);
  1032. } else {
  1033. /*
  1034. * Traditional scheme used on the real UE.
  1035. */
  1036. res = devm_request_irq(gsi_ctx->dev, props->irq,
  1037. gsi_isr,
  1038. props->req_clk_cb ? IRQF_TRIGGER_RISING :
  1039. IRQF_TRIGGER_HIGH,
  1040. "gsi",
  1041. gsi_ctx);
  1042. }
  1043. if (res) {
  1044. GSIERR(
  1045. "failed to register isr for %u\n",
  1046. props->irq);
  1047. return -GSI_STATUS_ERROR;
  1048. }
  1049. GSIDBG(
  1050. "succeeded to register isr for %u\n",
  1051. props->irq);
  1052. res = enable_irq_wake(props->irq);
  1053. if (res)
  1054. GSIERR("failed to enable wake irq %u\n", props->irq);
  1055. else
  1056. GSIERR("GSI irq is wake enabled %u\n", props->irq);
  1057. } else {
  1058. GSIERR("do not support interrupt type %u\n", props->intr);
  1059. return -GSI_STATUS_UNSUPPORTED_OP;
  1060. }
  1061. /*
  1062. * If base not previously mapped via gsi_map_base(), map it
  1063. * now...
  1064. */
  1065. if (!gsi_ctx->base) {
  1066. res = gsi_map_base(props->phys_addr, props->size);
  1067. if (res)
  1068. return res;
  1069. }
  1070. if (running_emulation) {
  1071. GSIDBG("GSI SW ver register value 0x%x\n",
  1072. gsi_readl(gsi_ctx->base +
  1073. GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
  1074. gsi_ctx->intcntrlr_mem_size =
  1075. props->emulator_intcntrlr_size;
  1076. gsi_ctx->intcntrlr_base =
  1077. devm_ioremap_nocache(
  1078. gsi_ctx->dev,
  1079. props->emulator_intcntrlr_addr,
  1080. props->emulator_intcntrlr_size);
  1081. if (!gsi_ctx->intcntrlr_base) {
  1082. GSIERR(
  1083. "failed to remap emulator's interrupt controller HW\n");
  1084. gsi_unmap_base();
  1085. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1086. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1087. }
  1088. GSIDBG(
  1089. "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
  1090. &(props->emulator_intcntrlr_addr),
  1091. gsi_ctx->intcntrlr_base,
  1092. props->emulator_intcntrlr_size);
  1093. gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
  1094. gsi_ctx->intcntrlr_client_isr =
  1095. props->emulator_intcntrlr_client_isr;
  1096. }
  1097. gsi_ctx->per = *props;
  1098. gsi_ctx->per_registered = true;
  1099. mutex_init(&gsi_ctx->mlock);
  1100. atomic_set(&gsi_ctx->num_chan, 0);
  1101. atomic_set(&gsi_ctx->num_evt_ring, 0);
  1102. gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
  1103. if (gsi_ctx->max_ch == 0) {
  1104. gsi_unmap_base();
  1105. if (running_emulation)
  1106. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1107. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1108. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1109. GSIERR("failed to get max channels\n");
  1110. return -GSI_STATUS_ERROR;
  1111. }
  1112. gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
  1113. if (gsi_ctx->max_ev == 0) {
  1114. gsi_unmap_base();
  1115. if (running_emulation)
  1116. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1117. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1118. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1119. GSIERR("failed to get max event rings\n");
  1120. return -GSI_STATUS_ERROR;
  1121. }
  1122. if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
  1123. GSIERR("max event rings are beyond absolute maximum\n");
  1124. return -GSI_STATUS_ERROR;
  1125. }
  1126. if (props->mhi_er_id_limits_valid &&
  1127. props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
  1128. gsi_unmap_base();
  1129. if (running_emulation)
  1130. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1131. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1132. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1133. GSIERR("MHI event ring start id %u is beyond max %u\n",
  1134. props->mhi_er_id_limits[0], gsi_ctx->max_ev);
  1135. return -GSI_STATUS_ERROR;
  1136. }
  1137. gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
  1138. /* exclude reserved mhi events */
  1139. if (props->mhi_er_id_limits_valid)
  1140. gsi_ctx->evt_bmap |=
  1141. ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
  1142. ((1 << (props->mhi_er_id_limits[0])) - 1);
  1143. /*
  1144. * enable all interrupts but GSI_BREAK_POINT.
  1145. * Inter EE commands / interrupt are no supported.
  1146. */
  1147. __gsi_config_type_irq(props->ee, ~0, ~0);
  1148. __gsi_config_ch_irq(props->ee, ~0, ~0);
  1149. __gsi_config_evt_irq(props->ee, ~0, ~0);
  1150. __gsi_config_ieob_irq(props->ee, ~0, ~0);
  1151. __gsi_config_glob_irq(props->ee, ~0, ~0);
  1152. /*
  1153. * Disabling global INT1 interrupt by default and enable it
  1154. * onlt when sending the generic command.
  1155. */
  1156. __gsi_config_gen_irq(props->ee, ~0,
  1157. ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
  1158. __gsi_config_glob_irq(props->ee,
  1159. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, 0);
  1160. gsi_writel(props->intr, gsi_ctx->base +
  1161. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  1162. /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
  1163. if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
  1164. (props->intr != GSI_INTR_MSI)) {
  1165. gsi_writel(0, gsi_ctx->base +
  1166. GSI_EE_n_CNTXT_MSI_BASE_LSB(gsi_ctx->per.ee));
  1167. gsi_writel(0, gsi_ctx->base +
  1168. GSI_EE_n_CNTXT_MSI_BASE_MSB(gsi_ctx->per.ee));
  1169. }
  1170. val = gsi_readl(gsi_ctx->base +
  1171. GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
  1172. if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
  1173. gsi_ctx->enabled = true;
  1174. else
  1175. GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
  1176. if (gsi_ctx->per.ver >= GSI_VER_1_2)
  1177. gsi_writel(0, gsi_ctx->base +
  1178. GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
  1179. if (running_emulation) {
  1180. /*
  1181. * Set up the emulator's interrupt controller...
  1182. */
  1183. res = setup_emulator_cntrlr(
  1184. gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
  1185. if (res != 0) {
  1186. gsi_unmap_base();
  1187. devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
  1188. gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
  1189. devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
  1190. GSIERR("setup_emulator_cntrlr() failed\n");
  1191. return res;
  1192. }
  1193. }
  1194. *dev_hdl = (uintptr_t)gsi_ctx;
  1195. return GSI_STATUS_SUCCESS;
  1196. }
  1197. EXPORT_SYMBOL(gsi_register_device);
  1198. int gsi_write_device_scratch(unsigned long dev_hdl,
  1199. struct gsi_device_scratch *val)
  1200. {
  1201. unsigned int max_usb_pkt_size = 0;
  1202. if (!gsi_ctx) {
  1203. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1204. return -GSI_STATUS_NODEV;
  1205. }
  1206. if (!gsi_ctx->per_registered) {
  1207. GSIERR("no client registered\n");
  1208. return -GSI_STATUS_INVALID_PARAMS;
  1209. }
  1210. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1211. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1212. gsi_ctx);
  1213. return -GSI_STATUS_INVALID_PARAMS;
  1214. }
  1215. if (val->max_usb_pkt_size_valid &&
  1216. val->max_usb_pkt_size != 1024 &&
  1217. val->max_usb_pkt_size != 512 &&
  1218. val->max_usb_pkt_size != 64) {
  1219. GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
  1220. val->max_usb_pkt_size);
  1221. return -GSI_STATUS_INVALID_PARAMS;
  1222. }
  1223. mutex_lock(&gsi_ctx->mlock);
  1224. if (val->mhi_base_chan_idx_valid)
  1225. gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
  1226. val->mhi_base_chan_idx;
  1227. if (val->max_usb_pkt_size_valid) {
  1228. max_usb_pkt_size = 2;
  1229. if (val->max_usb_pkt_size > 64)
  1230. max_usb_pkt_size =
  1231. (val->max_usb_pkt_size == 1024) ? 1 : 0;
  1232. gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
  1233. }
  1234. gsi_writel(gsi_ctx->scratch.word0.val,
  1235. gsi_ctx->base +
  1236. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  1237. mutex_unlock(&gsi_ctx->mlock);
  1238. return GSI_STATUS_SUCCESS;
  1239. }
  1240. EXPORT_SYMBOL(gsi_write_device_scratch);
  1241. int gsi_deregister_device(unsigned long dev_hdl, bool force)
  1242. {
  1243. if (!gsi_ctx) {
  1244. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1245. return -GSI_STATUS_NODEV;
  1246. }
  1247. if (!gsi_ctx->per_registered) {
  1248. GSIERR("no client registered\n");
  1249. return -GSI_STATUS_INVALID_PARAMS;
  1250. }
  1251. if (dev_hdl != (uintptr_t)gsi_ctx) {
  1252. GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
  1253. gsi_ctx);
  1254. return -GSI_STATUS_INVALID_PARAMS;
  1255. }
  1256. if (!force && atomic_read(&gsi_ctx->num_chan)) {
  1257. GSIERR("cannot deregister %u channels are still connected\n",
  1258. atomic_read(&gsi_ctx->num_chan));
  1259. return -GSI_STATUS_UNSUPPORTED_OP;
  1260. }
  1261. if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
  1262. GSIERR("cannot deregister %u events are still connected\n",
  1263. atomic_read(&gsi_ctx->num_evt_ring));
  1264. return -GSI_STATUS_UNSUPPORTED_OP;
  1265. }
  1266. /* disable all interrupts */
  1267. __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
  1268. __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
  1269. __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
  1270. __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
  1271. __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
  1272. __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
  1273. devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
  1274. gsi_unmap_base();
  1275. memset(gsi_ctx, 0, sizeof(*gsi_ctx));
  1276. return GSI_STATUS_SUCCESS;
  1277. }
  1278. EXPORT_SYMBOL(gsi_deregister_device);
  1279. static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
  1280. uint8_t evt_id, unsigned int ee)
  1281. {
  1282. uint32_t val;
  1283. GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
  1284. props->re_size);
  1285. val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
  1286. GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
  1287. ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
  1288. GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
  1289. ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
  1290. & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
  1291. gsi_writel(val, gsi_ctx->base +
  1292. GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
  1293. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  1294. val = (props->ring_len &
  1295. GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1296. << GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1297. gsi_writel(val, gsi_ctx->base +
  1298. GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
  1299. } else {
  1300. val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
  1301. << GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
  1302. gsi_writel(val, gsi_ctx->base +
  1303. GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
  1304. }
  1305. val = (props->ring_base_addr &
  1306. GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
  1307. GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
  1308. gsi_writel(val, gsi_ctx->base +
  1309. GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
  1310. val = ((props->ring_base_addr >> 32) &
  1311. GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
  1312. GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
  1313. gsi_writel(val, gsi_ctx->base +
  1314. GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
  1315. val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
  1316. GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
  1317. ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
  1318. GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
  1319. gsi_writel(val, gsi_ctx->base +
  1320. GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
  1321. val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
  1322. GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
  1323. gsi_writel(val, gsi_ctx->base +
  1324. GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
  1325. val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
  1326. GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
  1327. gsi_writel(val, gsi_ctx->base +
  1328. GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
  1329. val = ((props->msi_addr >> 32) &
  1330. GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
  1331. GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
  1332. gsi_writel(val, gsi_ctx->base +
  1333. GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
  1334. val = (props->rp_update_addr &
  1335. GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
  1336. GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
  1337. gsi_writel(val, gsi_ctx->base +
  1338. GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
  1339. val = ((props->rp_update_addr >> 32) &
  1340. GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
  1341. GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
  1342. gsi_writel(val, gsi_ctx->base +
  1343. GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
  1344. }
  1345. static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
  1346. struct gsi_ring_ctx *ctx)
  1347. {
  1348. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  1349. ctx->base = props->ring_base_addr;
  1350. ctx->wp = ctx->base;
  1351. ctx->rp = ctx->base;
  1352. ctx->wp_local = ctx->base;
  1353. ctx->rp_local = ctx->base;
  1354. ctx->len = props->ring_len;
  1355. ctx->elem_sz = props->re_size;
  1356. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  1357. ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
  1358. if (props->rp_update_vaddr)
  1359. *(uint64_t *)(props->rp_update_vaddr) = ctx->rp_local;
  1360. }
  1361. static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
  1362. {
  1363. unsigned long flags;
  1364. uint32_t val;
  1365. spin_lock_irqsave(&ctx->ring.slock, flags);
  1366. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1367. ctx->ring.wp_local = ctx->ring.base +
  1368. ctx->ring.max_num_elem * ctx->ring.elem_sz;
  1369. /* write order MUST be MSB followed by LSB */
  1370. val = ((ctx->ring.wp_local >> 32) &
  1371. GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  1372. GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  1373. gsi_writel(val, gsi_ctx->base +
  1374. GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
  1375. gsi_ctx->per.ee));
  1376. gsi_ring_evt_doorbell(ctx);
  1377. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1378. }
  1379. static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
  1380. {
  1381. unsigned long flags;
  1382. spin_lock_irqsave(&ctx->ring.slock, flags);
  1383. if (ctx->ring.base_va)
  1384. memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
  1385. ctx->ring.wp_local = ctx->ring.base +
  1386. ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
  1387. gsi_ring_evt_doorbell(ctx);
  1388. spin_unlock_irqrestore(&ctx->ring.slock, flags);
  1389. }
  1390. static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
  1391. {
  1392. uint64_t ra;
  1393. if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
  1394. props->ring_len % 4) ||
  1395. (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
  1396. props->ring_len % 8) ||
  1397. (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
  1398. props->ring_len % 16)) {
  1399. GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
  1400. props->ring_len, props->re_size);
  1401. return -GSI_STATUS_INVALID_PARAMS;
  1402. }
  1403. ra = props->ring_base_addr;
  1404. do_div(ra, roundup_pow_of_two(props->ring_len));
  1405. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  1406. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  1407. props->ring_base_addr,
  1408. roundup_pow_of_two(props->ring_len));
  1409. return -GSI_STATUS_INVALID_PARAMS;
  1410. }
  1411. if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
  1412. !props->ring_base_vaddr) {
  1413. GSIERR("protocol %u requires ring base VA\n", props->intf);
  1414. return -GSI_STATUS_INVALID_PARAMS;
  1415. }
  1416. if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
  1417. (!props->evchid_valid ||
  1418. props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
  1419. props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
  1420. GSIERR("MHI requires evchid valid=%d val=%u\n",
  1421. props->evchid_valid, props->evchid);
  1422. return -GSI_STATUS_INVALID_PARAMS;
  1423. }
  1424. if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
  1425. props->evchid_valid) {
  1426. GSIERR("protocol %u cannot specify evchid\n", props->intf);
  1427. return -GSI_STATUS_INVALID_PARAMS;
  1428. }
  1429. if (!props->err_cb) {
  1430. GSIERR("err callback must be provided\n");
  1431. return -GSI_STATUS_INVALID_PARAMS;
  1432. }
  1433. return GSI_STATUS_SUCCESS;
  1434. }
  1435. /**
  1436. * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
  1437. * by IPA driver. Need to do this in GSI since only GSI knows which TRE
  1438. * are being used or not. However, IPA is the one that does cleaning,
  1439. * therefore we pass a callback from IPA and call it using params from GSI
  1440. *
  1441. * @chan_hdl: hdl of the gsi channel user data array to be cleaned
  1442. * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
  1443. * @chan_user_data: ipa_sys_context of the gsi_channel
  1444. * @xfer_uder_data: user data array element (rx_pkt wrapper)
  1445. *
  1446. * Returns: 0 on success, negative on failure
  1447. */
  1448. static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
  1449. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
  1450. {
  1451. struct gsi_chan_ctx *ctx;
  1452. uint64_t i;
  1453. uint16_t rp_idx;
  1454. ctx = &gsi_ctx->chan[chan_hdl];
  1455. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  1456. GSIERR("bad state %d\n", ctx->state);
  1457. return -GSI_STATUS_UNSUPPORTED_OP;
  1458. }
  1459. /* for coalescing, traverse the whole array */
  1460. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  1461. size_t user_data_size =
  1462. ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
  1463. for (i = 0; i < user_data_size; i++) {
  1464. if (ctx->user_data[i].valid)
  1465. cleanup_cb(ctx->props.chan_user_data,
  1466. ctx->user_data[i].p);
  1467. }
  1468. } else {
  1469. /* for non-coalescing, clean between RP and WP */
  1470. while (ctx->ring.rp_local != ctx->ring.wp_local) {
  1471. rp_idx = gsi_find_idx_from_addr(&ctx->ring,
  1472. ctx->ring.rp_local);
  1473. WARN_ON(!ctx->user_data[rp_idx].valid);
  1474. cleanup_cb(ctx->props.chan_user_data,
  1475. ctx->user_data[rp_idx].p);
  1476. gsi_incr_ring_rp(&ctx->ring);
  1477. }
  1478. }
  1479. return 0;
  1480. }
  1481. /**
  1482. * gsi_read_event_ring_rp_ddr - function returns the RP value of the event
  1483. * ring read from the ring context register.
  1484. *
  1485. * @props: Props structere of the event channel
  1486. * @id: Event channel index
  1487. * @ee: EE
  1488. *
  1489. * @Return pointer to the read pointer
  1490. */
  1491. static inline uint64_t gsi_read_event_ring_rp_ddr(struct gsi_evt_ring_props* props,
  1492. uint8_t id, int ee)
  1493. {
  1494. return gsi_readl(props->rp_update_vaddr);
  1495. }
  1496. /**
  1497. * gsi_read_event_ring_rp_reg - function returns the RP value of the event ring
  1498. * read from the DDR.
  1499. *
  1500. * @props: Props structere of the event channel
  1501. * @id: Event channel index
  1502. * @ee: EE
  1503. *
  1504. * @Return pointer to the read pointer
  1505. */
  1506. static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
  1507. uint8_t id, int ee)
  1508. {
  1509. return gsi_readl(gsi_ctx->base + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(id, ee));
  1510. }
  1511. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1512. unsigned long *evt_ring_hdl)
  1513. {
  1514. unsigned long evt_id;
  1515. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
  1516. uint32_t val;
  1517. struct gsi_evt_ctx *ctx;
  1518. int res;
  1519. int ee;
  1520. unsigned long flags;
  1521. if (!gsi_ctx) {
  1522. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1523. return -GSI_STATUS_NODEV;
  1524. }
  1525. if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  1526. GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
  1527. props, dev_hdl, evt_ring_hdl);
  1528. return -GSI_STATUS_INVALID_PARAMS;
  1529. }
  1530. if (gsi_validate_evt_ring_props(props)) {
  1531. GSIERR("invalid params\n");
  1532. return -GSI_STATUS_INVALID_PARAMS;
  1533. }
  1534. if (!props->evchid_valid) {
  1535. mutex_lock(&gsi_ctx->mlock);
  1536. evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
  1537. sizeof(unsigned long) * BITS_PER_BYTE);
  1538. if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
  1539. GSIERR("failed to alloc event ID\n");
  1540. mutex_unlock(&gsi_ctx->mlock);
  1541. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1542. }
  1543. set_bit(evt_id, &gsi_ctx->evt_bmap);
  1544. mutex_unlock(&gsi_ctx->mlock);
  1545. } else {
  1546. evt_id = props->evchid;
  1547. }
  1548. GSIDBG("Using %lu as virt evt id\n", evt_id);
  1549. if (props->rp_update_addr != 0) {
  1550. GSIDBG("Using DDR to read event RP for virt evt id: %lu\n",
  1551. evt_id);
  1552. props->gsi_read_event_ring_rp =
  1553. gsi_read_event_ring_rp_ddr;
  1554. }
  1555. else {
  1556. GSIDBG("Using CONTEXT reg to read event RP for virt evt id: %lu\n",
  1557. evt_id);
  1558. props->gsi_read_event_ring_rp =
  1559. gsi_read_event_ring_rp_reg;
  1560. }
  1561. ctx = &gsi_ctx->evtr[evt_id];
  1562. memset(ctx, 0, sizeof(*ctx));
  1563. mutex_init(&ctx->mlock);
  1564. init_completion(&ctx->compl);
  1565. atomic_set(&ctx->chan_ref_cnt, 0);
  1566. ctx->props = *props;
  1567. mutex_lock(&gsi_ctx->mlock);
  1568. val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1569. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1570. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1571. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1572. ee = gsi_ctx->per.ee;
  1573. gsi_writel(val, gsi_ctx->base +
  1574. GSI_EE_n_EV_CH_CMD_OFFS(ee));
  1575. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1576. if (res == 0) {
  1577. GSIERR("evt_id=%lu timed out\n", evt_id);
  1578. if (!props->evchid_valid)
  1579. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1580. mutex_unlock(&gsi_ctx->mlock);
  1581. return -GSI_STATUS_TIMED_OUT;
  1582. }
  1583. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1584. GSIERR("evt_id=%lu allocation failed state=%u\n",
  1585. evt_id, ctx->state);
  1586. if (!props->evchid_valid)
  1587. clear_bit(evt_id, &gsi_ctx->evt_bmap);
  1588. mutex_unlock(&gsi_ctx->mlock);
  1589. return -GSI_STATUS_RES_ALLOC_FAILURE;
  1590. }
  1591. gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
  1592. spin_lock_init(&ctx->ring.slock);
  1593. gsi_init_evt_ring(props, &ctx->ring);
  1594. ctx->id = evt_id;
  1595. *evt_ring_hdl = evt_id;
  1596. atomic_inc(&gsi_ctx->num_evt_ring);
  1597. if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
  1598. gsi_prime_evt_ring(ctx);
  1599. else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
  1600. gsi_prime_evt_ring_wdi(ctx);
  1601. mutex_unlock(&gsi_ctx->mlock);
  1602. spin_lock_irqsave(&gsi_ctx->slock, flags);
  1603. gsi_writel(1 << evt_id, gsi_ctx->base +
  1604. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  1605. /* enable ieob interrupts for GPI, enable MSI interrupts */
  1606. if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
  1607. (props->intr != GSI_INTR_MSI))
  1608. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
  1609. else
  1610. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
  1611. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  1612. return GSI_STATUS_SUCCESS;
  1613. }
  1614. EXPORT_SYMBOL(gsi_alloc_evt_ring);
  1615. static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1616. union __packed gsi_evt_scratch val)
  1617. {
  1618. gsi_writel(val.data.word1, gsi_ctx->base +
  1619. GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
  1620. gsi_ctx->per.ee));
  1621. gsi_writel(val.data.word2, gsi_ctx->base +
  1622. GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
  1623. gsi_ctx->per.ee));
  1624. }
  1625. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1626. union __packed gsi_evt_scratch val)
  1627. {
  1628. struct gsi_evt_ctx *ctx;
  1629. if (!gsi_ctx) {
  1630. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1631. return -GSI_STATUS_NODEV;
  1632. }
  1633. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1634. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1635. return -GSI_STATUS_INVALID_PARAMS;
  1636. }
  1637. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1638. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1639. GSIERR("bad state %d\n",
  1640. gsi_ctx->evtr[evt_ring_hdl].state);
  1641. return -GSI_STATUS_UNSUPPORTED_OP;
  1642. }
  1643. mutex_lock(&ctx->mlock);
  1644. ctx->scratch = val;
  1645. __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
  1646. mutex_unlock(&ctx->mlock);
  1647. return GSI_STATUS_SUCCESS;
  1648. }
  1649. EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
  1650. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
  1651. {
  1652. uint32_t val;
  1653. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
  1654. struct gsi_evt_ctx *ctx;
  1655. int res;
  1656. if (!gsi_ctx) {
  1657. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1658. return -GSI_STATUS_NODEV;
  1659. }
  1660. if (evt_ring_hdl >= gsi_ctx->max_ev ||
  1661. evt_ring_hdl >= GSI_EVT_RING_MAX) {
  1662. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1663. return -GSI_STATUS_INVALID_PARAMS;
  1664. }
  1665. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1666. if (atomic_read(&ctx->chan_ref_cnt)) {
  1667. GSIERR("%d channels still using this event ring\n",
  1668. atomic_read(&ctx->chan_ref_cnt));
  1669. return -GSI_STATUS_UNSUPPORTED_OP;
  1670. }
  1671. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1672. GSIERR("bad state %d\n", ctx->state);
  1673. return -GSI_STATUS_UNSUPPORTED_OP;
  1674. }
  1675. mutex_lock(&gsi_ctx->mlock);
  1676. reinit_completion(&ctx->compl);
  1677. val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1678. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1679. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1680. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1681. gsi_writel(val, gsi_ctx->base +
  1682. GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
  1683. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1684. if (res == 0) {
  1685. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1686. mutex_unlock(&gsi_ctx->mlock);
  1687. return -GSI_STATUS_TIMED_OUT;
  1688. }
  1689. if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1690. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1691. ctx->state);
  1692. /*
  1693. * IPA Hardware returned GSI RING not allocated, which is
  1694. * unexpected hardware state.
  1695. */
  1696. GSI_ASSERT();
  1697. }
  1698. mutex_unlock(&gsi_ctx->mlock);
  1699. if (!ctx->props.evchid_valid) {
  1700. mutex_lock(&gsi_ctx->mlock);
  1701. clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
  1702. mutex_unlock(&gsi_ctx->mlock);
  1703. }
  1704. atomic_dec(&gsi_ctx->num_evt_ring);
  1705. return GSI_STATUS_SUCCESS;
  1706. }
  1707. EXPORT_SYMBOL(gsi_dealloc_evt_ring);
  1708. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1709. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  1710. {
  1711. struct gsi_evt_ctx *ctx;
  1712. if (!gsi_ctx) {
  1713. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1714. return -GSI_STATUS_NODEV;
  1715. }
  1716. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  1717. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  1718. db_addr_wp_lsb);
  1719. return -GSI_STATUS_INVALID_PARAMS;
  1720. }
  1721. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1722. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1723. return -GSI_STATUS_INVALID_PARAMS;
  1724. }
  1725. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1726. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1727. GSIERR("bad state %d\n",
  1728. gsi_ctx->evtr[evt_ring_hdl].state);
  1729. return -GSI_STATUS_UNSUPPORTED_OP;
  1730. }
  1731. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  1732. GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
  1733. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  1734. GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
  1735. return GSI_STATUS_SUCCESS;
  1736. }
  1737. EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
  1738. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
  1739. {
  1740. struct gsi_evt_ctx *ctx;
  1741. if (!gsi_ctx) {
  1742. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1743. return -GSI_STATUS_NODEV;
  1744. }
  1745. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1746. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1747. return -GSI_STATUS_INVALID_PARAMS;
  1748. }
  1749. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1750. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1751. GSIERR("bad state %d\n",
  1752. gsi_ctx->evtr[evt_ring_hdl].state);
  1753. return -GSI_STATUS_UNSUPPORTED_OP;
  1754. }
  1755. ctx->ring.wp_local = value;
  1756. gsi_ring_evt_doorbell(ctx);
  1757. return GSI_STATUS_SUCCESS;
  1758. }
  1759. EXPORT_SYMBOL(gsi_ring_evt_ring_db);
  1760. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
  1761. {
  1762. struct gsi_chan_ctx *ctx;
  1763. uint32_t val;
  1764. if (!gsi_ctx) {
  1765. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1766. return -GSI_STATUS_NODEV;
  1767. }
  1768. if (chan_hdl >= gsi_ctx->max_ch) {
  1769. GSIERR("bad chan_hdl=%lu\n", chan_hdl);
  1770. return -GSI_STATUS_INVALID_PARAMS;
  1771. }
  1772. ctx = &gsi_ctx->chan[chan_hdl];
  1773. if (ctx->state != GSI_CHAN_STATE_STARTED) {
  1774. GSIERR("bad state %d\n", ctx->state);
  1775. return -GSI_STATUS_UNSUPPORTED_OP;
  1776. }
  1777. ctx->ring.wp_local = value;
  1778. /* write MSB first */
  1779. val = ((ctx->ring.wp_local >> 32) &
  1780. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  1781. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  1782. gsi_writel(val, gsi_ctx->base +
  1783. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
  1784. gsi_ctx->per.ee));
  1785. gsi_ring_chan_doorbell(ctx);
  1786. return GSI_STATUS_SUCCESS;
  1787. }
  1788. EXPORT_SYMBOL(gsi_ring_ch_ring_db);
  1789. int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
  1790. {
  1791. uint32_t val;
  1792. enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
  1793. struct gsi_evt_ctx *ctx;
  1794. int res;
  1795. if (!gsi_ctx) {
  1796. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1797. return -GSI_STATUS_NODEV;
  1798. }
  1799. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1800. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1801. return -GSI_STATUS_INVALID_PARAMS;
  1802. }
  1803. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1804. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1805. GSIERR("bad state %d\n", ctx->state);
  1806. return -GSI_STATUS_UNSUPPORTED_OP;
  1807. }
  1808. mutex_lock(&gsi_ctx->mlock);
  1809. reinit_completion(&ctx->compl);
  1810. val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
  1811. GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
  1812. ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
  1813. GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
  1814. gsi_writel(val, gsi_ctx->base +
  1815. GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
  1816. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  1817. if (res == 0) {
  1818. GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
  1819. mutex_unlock(&gsi_ctx->mlock);
  1820. return -GSI_STATUS_TIMED_OUT;
  1821. }
  1822. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1823. GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
  1824. ctx->state);
  1825. /*
  1826. * IPA Hardware returned GSI RING not allocated, which is
  1827. * unexpected. Indicates hardware instability.
  1828. */
  1829. GSI_ASSERT();
  1830. }
  1831. gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
  1832. gsi_init_evt_ring(&ctx->props, &ctx->ring);
  1833. /* restore scratch */
  1834. __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
  1835. if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
  1836. gsi_prime_evt_ring(ctx);
  1837. if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
  1838. gsi_prime_evt_ring_wdi(ctx);
  1839. mutex_unlock(&gsi_ctx->mlock);
  1840. return GSI_STATUS_SUCCESS;
  1841. }
  1842. EXPORT_SYMBOL(gsi_reset_evt_ring);
  1843. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1844. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1845. {
  1846. struct gsi_evt_ctx *ctx;
  1847. if (!gsi_ctx) {
  1848. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1849. return -GSI_STATUS_NODEV;
  1850. }
  1851. if (!props || !scr) {
  1852. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  1853. return -GSI_STATUS_INVALID_PARAMS;
  1854. }
  1855. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1856. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1857. return -GSI_STATUS_INVALID_PARAMS;
  1858. }
  1859. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1860. if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  1861. GSIERR("bad state %d\n", ctx->state);
  1862. return -GSI_STATUS_UNSUPPORTED_OP;
  1863. }
  1864. mutex_lock(&ctx->mlock);
  1865. *props = ctx->props;
  1866. *scr = ctx->scratch;
  1867. mutex_unlock(&ctx->mlock);
  1868. return GSI_STATUS_SUCCESS;
  1869. }
  1870. EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
  1871. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1872. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
  1873. {
  1874. struct gsi_evt_ctx *ctx;
  1875. if (!gsi_ctx) {
  1876. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  1877. return -GSI_STATUS_NODEV;
  1878. }
  1879. if (!props || gsi_validate_evt_ring_props(props)) {
  1880. GSIERR("bad params props=%pK\n", props);
  1881. return -GSI_STATUS_INVALID_PARAMS;
  1882. }
  1883. if (evt_ring_hdl >= gsi_ctx->max_ev) {
  1884. GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
  1885. return -GSI_STATUS_INVALID_PARAMS;
  1886. }
  1887. ctx = &gsi_ctx->evtr[evt_ring_hdl];
  1888. if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
  1889. GSIERR("bad state %d\n", ctx->state);
  1890. return -GSI_STATUS_UNSUPPORTED_OP;
  1891. }
  1892. if (ctx->props.exclusive != props->exclusive) {
  1893. GSIERR("changing immutable fields not supported\n");
  1894. return -GSI_STATUS_UNSUPPORTED_OP;
  1895. }
  1896. mutex_lock(&ctx->mlock);
  1897. ctx->props = *props;
  1898. if (scr)
  1899. ctx->scratch = *scr;
  1900. mutex_unlock(&ctx->mlock);
  1901. return gsi_reset_evt_ring(evt_ring_hdl);
  1902. }
  1903. EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
  1904. static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
  1905. unsigned int ee)
  1906. {
  1907. uint32_t val;
  1908. val =
  1909. (((props->low_weight <<
  1910. GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1911. GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1912. ((props->max_prefetch <<
  1913. GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1914. GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1915. ((props->use_db_eng <<
  1916. GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1917. GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
  1918. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  1919. val |= ((props->prefetch_mode <<
  1920. GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT)
  1921. & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK);
  1922. gsi_writel(val, gsi_ctx->base +
  1923. GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1924. }
  1925. static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props,
  1926. unsigned int ee)
  1927. {
  1928. uint32_t val;
  1929. val =
  1930. (((props->low_weight <<
  1931. GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1932. GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1933. ((props->max_prefetch <<
  1934. GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1935. GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1936. ((props->use_db_eng <<
  1937. GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1938. GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
  1939. ((props->prefetch_mode <<
  1940. GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
  1941. GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
  1942. ((props->empty_lvl_threshold <<
  1943. GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
  1944. GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK));
  1945. gsi_writel(val, gsi_ctx->base +
  1946. GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1947. }
  1948. static void gsi_program_chan_ctx_qos_v2_9(struct gsi_chan_props *props,
  1949. unsigned int ee)
  1950. {
  1951. uint32_t val;
  1952. val =
  1953. (((props->low_weight <<
  1954. GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
  1955. GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
  1956. ((props->max_prefetch <<
  1957. GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
  1958. GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
  1959. ((props->use_db_eng <<
  1960. GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
  1961. GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
  1962. ((props->prefetch_mode <<
  1963. GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
  1964. GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
  1965. ((props->empty_lvl_threshold <<
  1966. GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
  1967. GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK) |
  1968. ((props->db_in_bytes <<
  1969. GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_SHFT) &
  1970. GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_BMSK));
  1971. gsi_writel(val, gsi_ctx->base +
  1972. GSI_V2_9_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
  1973. }
  1974. static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
  1975. uint8_t erindex)
  1976. {
  1977. uint32_t val;
  1978. uint32_t prot;
  1979. uint32_t prot_msb;
  1980. switch (props->prot) {
  1981. case GSI_CHAN_PROT_MHI:
  1982. case GSI_CHAN_PROT_XHCI:
  1983. case GSI_CHAN_PROT_GPI:
  1984. case GSI_CHAN_PROT_XDCI:
  1985. case GSI_CHAN_PROT_WDI2:
  1986. case GSI_CHAN_PROT_WDI3:
  1987. case GSI_CHAN_PROT_GCI:
  1988. case GSI_CHAN_PROT_MHIP:
  1989. prot_msb = 0;
  1990. break;
  1991. case GSI_CHAN_PROT_AQC:
  1992. case GSI_CHAN_PROT_11AD:
  1993. prot_msb = 1;
  1994. break;
  1995. default:
  1996. GSIERR("Unsupported protocol %d\n", props->prot);
  1997. WARN_ON(1);
  1998. return;
  1999. }
  2000. prot = props->prot;
  2001. val = ((prot <<
  2002. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) &
  2003. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK);
  2004. if (gsi_ctx->per.ver >= GSI_VER_2_5) {
  2005. val |= ((prot_msb <<
  2006. GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) &
  2007. GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK);
  2008. }
  2009. val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
  2010. GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
  2011. ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
  2012. GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
  2013. ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
  2014. & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
  2015. gsi_writel(val, gsi_ctx->base +
  2016. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
  2017. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  2018. val = (props->ring_len &
  2019. GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
  2020. << GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
  2021. gsi_writel(val, gsi_ctx->base +
  2022. GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_OFFS(
  2023. props->ch_id, ee));
  2024. } else {
  2025. val = (props->ring_len &
  2026. GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
  2027. << GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
  2028. gsi_writel(val, gsi_ctx->base +
  2029. GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id,
  2030. ee));
  2031. }
  2032. val = (props->ring_base_addr &
  2033. GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
  2034. GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
  2035. gsi_writel(val, gsi_ctx->base +
  2036. GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
  2037. val = ((props->ring_base_addr >> 32) &
  2038. GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
  2039. GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
  2040. gsi_writel(val, gsi_ctx->base +
  2041. GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
  2042. if (gsi_ctx->per.ver >= GSI_VER_2_9)
  2043. gsi_program_chan_ctx_qos_v2_9(props, ee);
  2044. else if (gsi_ctx->per.ver >= GSI_VER_2_5)
  2045. gsi_program_chan_ctx_qos_v2_5(props, ee);
  2046. else
  2047. gsi_program_chan_ctx_qos(props, ee);
  2048. }
  2049. static void gsi_init_chan_ring(struct gsi_chan_props *props,
  2050. struct gsi_ring_ctx *ctx)
  2051. {
  2052. ctx->base_va = (uintptr_t)props->ring_base_vaddr;
  2053. ctx->base = props->ring_base_addr;
  2054. ctx->wp = ctx->base;
  2055. ctx->rp = ctx->base;
  2056. ctx->wp_local = ctx->base;
  2057. ctx->rp_local = ctx->base;
  2058. ctx->len = props->ring_len;
  2059. ctx->elem_sz = props->re_size;
  2060. ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
  2061. ctx->end = ctx->base + (ctx->max_num_elem + 1) *
  2062. ctx->elem_sz;
  2063. }
  2064. static int gsi_validate_channel_props(struct gsi_chan_props *props)
  2065. {
  2066. uint64_t ra;
  2067. uint64_t last;
  2068. if (props->ch_id >= gsi_ctx->max_ch) {
  2069. GSIERR("ch_id %u invalid\n", props->ch_id);
  2070. return -GSI_STATUS_INVALID_PARAMS;
  2071. }
  2072. if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
  2073. props->ring_len % 4) ||
  2074. (props->re_size == GSI_CHAN_RE_SIZE_8B &&
  2075. props->ring_len % 8) ||
  2076. (props->re_size == GSI_CHAN_RE_SIZE_16B &&
  2077. props->ring_len % 16) ||
  2078. (props->re_size == GSI_CHAN_RE_SIZE_32B &&
  2079. props->ring_len % 32)) {
  2080. GSIERR("bad params ring_len %u not a multiple of re size %u\n",
  2081. props->ring_len, props->re_size);
  2082. return -GSI_STATUS_INVALID_PARAMS;
  2083. }
  2084. ra = props->ring_base_addr;
  2085. do_div(ra, roundup_pow_of_two(props->ring_len));
  2086. if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
  2087. GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
  2088. props->ring_base_addr,
  2089. roundup_pow_of_two(props->ring_len));
  2090. return -GSI_STATUS_INVALID_PARAMS;
  2091. }
  2092. last = props->ring_base_addr + props->ring_len - props->re_size;
  2093. /* MSB should stay same within the ring */
  2094. if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
  2095. (last & 0xFFFFFFFF00000000ULL)) {
  2096. GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
  2097. props->ring_base_addr,
  2098. props->ring_len);
  2099. return -GSI_STATUS_INVALID_PARAMS;
  2100. }
  2101. if (props->prot == GSI_CHAN_PROT_GPI &&
  2102. !props->ring_base_vaddr) {
  2103. GSIERR("protocol %u requires ring base VA\n", props->prot);
  2104. return -GSI_STATUS_INVALID_PARAMS;
  2105. }
  2106. if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
  2107. GSIERR("invalid channel low weight %u\n", props->low_weight);
  2108. return -GSI_STATUS_INVALID_PARAMS;
  2109. }
  2110. if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
  2111. GSIERR("xfer callback must be provided\n");
  2112. return -GSI_STATUS_INVALID_PARAMS;
  2113. }
  2114. if (!props->err_cb) {
  2115. GSIERR("err callback must be provided\n");
  2116. return -GSI_STATUS_INVALID_PARAMS;
  2117. }
  2118. return GSI_STATUS_SUCCESS;
  2119. }
  2120. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  2121. unsigned long *chan_hdl)
  2122. {
  2123. struct gsi_chan_ctx *ctx;
  2124. uint32_t val;
  2125. int res;
  2126. int ee;
  2127. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2128. uint8_t erindex;
  2129. struct gsi_user_data *user_data;
  2130. size_t user_data_size;
  2131. if (!gsi_ctx) {
  2132. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2133. return -GSI_STATUS_NODEV;
  2134. }
  2135. if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
  2136. GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
  2137. props, dev_hdl, chan_hdl);
  2138. return -GSI_STATUS_INVALID_PARAMS;
  2139. }
  2140. if (gsi_validate_channel_props(props)) {
  2141. GSIERR("bad params\n");
  2142. return -GSI_STATUS_INVALID_PARAMS;
  2143. }
  2144. if (props->evt_ring_hdl != ~0) {
  2145. if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
  2146. GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
  2147. return -GSI_STATUS_INVALID_PARAMS;
  2148. }
  2149. if (atomic_read(
  2150. &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
  2151. gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
  2152. gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
  2153. GSI_CHAN_PROT_GCI) {
  2154. GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
  2155. props->evt_ring_hdl, chan_hdl);
  2156. return -GSI_STATUS_UNSUPPORTED_OP;
  2157. }
  2158. }
  2159. ctx = &gsi_ctx->chan[props->ch_id];
  2160. if (ctx->allocated) {
  2161. GSIERR("chan %d already allocated\n", props->ch_id);
  2162. return -GSI_STATUS_NODEV;
  2163. }
  2164. memset(ctx, 0, sizeof(*ctx));
  2165. /* For IPA offloaded WDI channels not required user_data pointer */
  2166. if (props->prot != GSI_CHAN_PROT_WDI2 &&
  2167. props->prot != GSI_CHAN_PROT_WDI3)
  2168. user_data_size = props->ring_len / props->re_size;
  2169. else
  2170. user_data_size = props->re_size;
  2171. /*
  2172. * GCI channels might have OOO event completions up to GSI_VEID_MAX.
  2173. * user_data needs to be large enough to accommodate those.
  2174. * TODO: increase user data size if GSI_VEID_MAX is not enough
  2175. */
  2176. if (props->prot == GSI_CHAN_PROT_GCI)
  2177. user_data_size += GSI_VEID_MAX;
  2178. user_data = devm_kzalloc(gsi_ctx->dev,
  2179. user_data_size * sizeof(*user_data),
  2180. GFP_KERNEL);
  2181. if (user_data == NULL) {
  2182. GSIERR("context not allocated\n");
  2183. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2184. }
  2185. mutex_init(&ctx->mlock);
  2186. init_completion(&ctx->compl);
  2187. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2188. ctx->props = *props;
  2189. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2190. mutex_lock(&gsi_ctx->mlock);
  2191. ee = gsi_ctx->per.ee;
  2192. gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
  2193. val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2194. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2195. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2196. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2197. gsi_writel(val, gsi_ctx->base +
  2198. GSI_EE_n_GSI_CH_CMD_OFFS(ee));
  2199. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2200. if (res == 0) {
  2201. GSIERR("chan_hdl=%u timed out\n", props->ch_id);
  2202. mutex_unlock(&gsi_ctx->mlock);
  2203. devm_kfree(gsi_ctx->dev, user_data);
  2204. return -GSI_STATUS_TIMED_OUT;
  2205. }
  2206. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2207. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2208. props->ch_id, ctx->state);
  2209. mutex_unlock(&gsi_ctx->mlock);
  2210. devm_kfree(gsi_ctx->dev, user_data);
  2211. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2212. }
  2213. mutex_unlock(&gsi_ctx->mlock);
  2214. } else {
  2215. mutex_lock(&gsi_ctx->mlock);
  2216. ctx->state = GSI_CHAN_STATE_ALLOCATED;
  2217. mutex_unlock(&gsi_ctx->mlock);
  2218. }
  2219. erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
  2220. GSI_NO_EVT_ERINDEX;
  2221. if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
  2222. GSIERR("invalid erindex %u\n", erindex);
  2223. devm_kfree(gsi_ctx->dev, user_data);
  2224. return -GSI_STATUS_INVALID_PARAMS;
  2225. }
  2226. if (erindex < GSI_EVT_RING_MAX) {
  2227. ctx->evtr = &gsi_ctx->evtr[erindex];
  2228. if (props->prot != GSI_CHAN_PROT_GCI)
  2229. atomic_inc(&ctx->evtr->chan_ref_cnt);
  2230. if (props->prot != GSI_CHAN_PROT_GCI &&
  2231. ctx->evtr->props.exclusive &&
  2232. atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
  2233. ctx->evtr->chan = ctx;
  2234. }
  2235. gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
  2236. spin_lock_init(&ctx->ring.slock);
  2237. gsi_init_chan_ring(props, &ctx->ring);
  2238. if (!props->max_re_expected)
  2239. ctx->props.max_re_expected = ctx->ring.max_num_elem;
  2240. ctx->user_data = user_data;
  2241. *chan_hdl = props->ch_id;
  2242. ctx->allocated = true;
  2243. ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
  2244. atomic_inc(&gsi_ctx->num_chan);
  2245. if (props->prot == GSI_CHAN_PROT_GCI) {
  2246. gsi_ctx->coal_info.ch_id = props->ch_id;
  2247. gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
  2248. }
  2249. return GSI_STATUS_SUCCESS;
  2250. }
  2251. EXPORT_SYMBOL(gsi_alloc_channel);
  2252. static int gsi_alloc_ap_channel(unsigned int chan_hdl)
  2253. {
  2254. struct gsi_chan_ctx *ctx;
  2255. uint32_t val;
  2256. int res;
  2257. int ee;
  2258. enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
  2259. if (!gsi_ctx) {
  2260. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2261. return -GSI_STATUS_NODEV;
  2262. }
  2263. ctx = &gsi_ctx->chan[chan_hdl];
  2264. if (ctx->allocated) {
  2265. GSIERR("chan %d already allocated\n", chan_hdl);
  2266. return -GSI_STATUS_NODEV;
  2267. }
  2268. memset(ctx, 0, sizeof(*ctx));
  2269. mutex_init(&ctx->mlock);
  2270. init_completion(&ctx->compl);
  2271. atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
  2272. mutex_lock(&gsi_ctx->mlock);
  2273. ee = gsi_ctx->per.ee;
  2274. gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
  2275. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2276. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2277. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2278. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2279. gsi_writel(val, gsi_ctx->base +
  2280. GSI_EE_n_GSI_CH_CMD_OFFS(ee));
  2281. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2282. if (res == 0) {
  2283. GSIERR("chan_hdl=%u timed out\n", chan_hdl);
  2284. mutex_unlock(&gsi_ctx->mlock);
  2285. return -GSI_STATUS_TIMED_OUT;
  2286. }
  2287. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2288. GSIERR("chan_hdl=%u allocation failed state=%d\n",
  2289. chan_hdl, ctx->state);
  2290. mutex_unlock(&gsi_ctx->mlock);
  2291. return -GSI_STATUS_RES_ALLOC_FAILURE;
  2292. }
  2293. mutex_unlock(&gsi_ctx->mlock);
  2294. return GSI_STATUS_SUCCESS;
  2295. }
  2296. static void __gsi_write_channel_scratch(unsigned long chan_hdl,
  2297. union __packed gsi_channel_scratch val)
  2298. {
  2299. gsi_writel(val.data.word1, gsi_ctx->base +
  2300. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2301. gsi_ctx->per.ee));
  2302. gsi_writel(val.data.word2, gsi_ctx->base +
  2303. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2304. gsi_ctx->per.ee));
  2305. gsi_writel(val.data.word3, gsi_ctx->base +
  2306. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2307. gsi_ctx->per.ee));
  2308. gsi_writel(val.data.word4, gsi_ctx->base +
  2309. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2310. gsi_ctx->per.ee));
  2311. }
  2312. static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2313. union __packed gsi_wdi3_channel_scratch2_reg val)
  2314. {
  2315. gsi_writel(val.data.word1, gsi_ctx->base +
  2316. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2317. gsi_ctx->per.ee));
  2318. }
  2319. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  2320. union __packed gsi_wdi_channel_scratch3_reg val)
  2321. {
  2322. struct gsi_chan_ctx *ctx;
  2323. if (!gsi_ctx) {
  2324. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2325. return -GSI_STATUS_NODEV;
  2326. }
  2327. if (chan_hdl >= gsi_ctx->max_ch) {
  2328. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2329. return -GSI_STATUS_INVALID_PARAMS;
  2330. }
  2331. ctx = &gsi_ctx->chan[chan_hdl];
  2332. mutex_lock(&ctx->mlock);
  2333. ctx->scratch.wdi.endp_metadatareg_offset =
  2334. val.wdi.endp_metadatareg_offset;
  2335. ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
  2336. gsi_writel(val.data.word1, gsi_ctx->base +
  2337. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2338. gsi_ctx->per.ee));
  2339. mutex_unlock(&ctx->mlock);
  2340. return GSI_STATUS_SUCCESS;
  2341. }
  2342. EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
  2343. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  2344. union __packed gsi_wdi2_channel_scratch2_reg val)
  2345. {
  2346. struct gsi_chan_ctx *ctx;
  2347. if (!gsi_ctx) {
  2348. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2349. return -GSI_STATUS_NODEV;
  2350. }
  2351. if (chan_hdl >= gsi_ctx->max_ch) {
  2352. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2353. return -GSI_STATUS_INVALID_PARAMS;
  2354. }
  2355. ctx = &gsi_ctx->chan[chan_hdl];
  2356. mutex_lock(&ctx->mlock);
  2357. ctx->scratch.wdi2_new.endp_metadatareg_offset =
  2358. val.wdi.endp_metadatareg_offset;
  2359. ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
  2360. val.wdi.update_ri_moderation_threshold =
  2361. ctx->scratch.wdi2_new.update_ri_moderation_threshold;
  2362. gsi_writel(val.data.word1, gsi_ctx->base +
  2363. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2364. gsi_ctx->per.ee));
  2365. mutex_unlock(&ctx->mlock);
  2366. return GSI_STATUS_SUCCESS;
  2367. }
  2368. EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
  2369. static void __gsi_read_channel_scratch(unsigned long chan_hdl,
  2370. union __packed gsi_channel_scratch * val)
  2371. {
  2372. val->data.word1 = gsi_readl(gsi_ctx->base +
  2373. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  2374. gsi_ctx->per.ee));
  2375. val->data.word2 = gsi_readl(gsi_ctx->base +
  2376. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  2377. gsi_ctx->per.ee));
  2378. val->data.word3 = gsi_readl(gsi_ctx->base +
  2379. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2380. gsi_ctx->per.ee));
  2381. val->data.word4 = gsi_readl(gsi_ctx->base +
  2382. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  2383. gsi_ctx->per.ee));
  2384. }
  2385. static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2386. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2387. {
  2388. val->data.word1 = gsi_readl(gsi_ctx->base +
  2389. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  2390. gsi_ctx->per.ee));
  2391. }
  2392. int gsi_write_channel_scratch(unsigned long chan_hdl,
  2393. union __packed gsi_channel_scratch val)
  2394. {
  2395. struct gsi_chan_ctx *ctx;
  2396. if (!gsi_ctx) {
  2397. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2398. return -GSI_STATUS_NODEV;
  2399. }
  2400. if (chan_hdl >= gsi_ctx->max_ch) {
  2401. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2402. return -GSI_STATUS_INVALID_PARAMS;
  2403. }
  2404. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2405. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2406. GSIERR("bad state %d\n",
  2407. gsi_ctx->chan[chan_hdl].state);
  2408. return -GSI_STATUS_UNSUPPORTED_OP;
  2409. }
  2410. ctx = &gsi_ctx->chan[chan_hdl];
  2411. mutex_lock(&ctx->mlock);
  2412. ctx->scratch = val;
  2413. __gsi_write_channel_scratch(chan_hdl, val);
  2414. mutex_unlock(&ctx->mlock);
  2415. return GSI_STATUS_SUCCESS;
  2416. }
  2417. EXPORT_SYMBOL(gsi_write_channel_scratch);
  2418. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2419. union __packed gsi_wdi3_channel_scratch2_reg val)
  2420. {
  2421. struct gsi_chan_ctx *ctx;
  2422. if (!gsi_ctx) {
  2423. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2424. return -GSI_STATUS_NODEV;
  2425. }
  2426. if (chan_hdl >= gsi_ctx->max_ch) {
  2427. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2428. return -GSI_STATUS_INVALID_PARAMS;
  2429. }
  2430. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2431. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2432. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2433. GSIERR("bad state %d\n",
  2434. gsi_ctx->chan[chan_hdl].state);
  2435. return -GSI_STATUS_UNSUPPORTED_OP;
  2436. }
  2437. ctx = &gsi_ctx->chan[chan_hdl];
  2438. mutex_lock(&ctx->mlock);
  2439. ctx->scratch.data.word3 = val.data.word1;
  2440. __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
  2441. mutex_unlock(&ctx->mlock);
  2442. return GSI_STATUS_SUCCESS;
  2443. }
  2444. EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
  2445. int gsi_read_channel_scratch(unsigned long chan_hdl,
  2446. union __packed gsi_channel_scratch *val)
  2447. {
  2448. struct gsi_chan_ctx *ctx;
  2449. if (!gsi_ctx) {
  2450. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2451. return -GSI_STATUS_NODEV;
  2452. }
  2453. if (chan_hdl >= gsi_ctx->max_ch) {
  2454. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2455. return -GSI_STATUS_INVALID_PARAMS;
  2456. }
  2457. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2458. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2459. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2460. GSIERR("bad state %d\n",
  2461. gsi_ctx->chan[chan_hdl].state);
  2462. return -GSI_STATUS_UNSUPPORTED_OP;
  2463. }
  2464. ctx = &gsi_ctx->chan[chan_hdl];
  2465. mutex_lock(&ctx->mlock);
  2466. __gsi_read_channel_scratch(chan_hdl, val);
  2467. mutex_unlock(&ctx->mlock);
  2468. return GSI_STATUS_SUCCESS;
  2469. }
  2470. EXPORT_SYMBOL(gsi_read_channel_scratch);
  2471. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  2472. union __packed gsi_wdi3_channel_scratch2_reg * val)
  2473. {
  2474. struct gsi_chan_ctx *ctx;
  2475. if (!gsi_ctx) {
  2476. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2477. return -GSI_STATUS_NODEV;
  2478. }
  2479. if (chan_hdl >= gsi_ctx->max_ch) {
  2480. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2481. return -GSI_STATUS_INVALID_PARAMS;
  2482. }
  2483. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2484. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
  2485. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2486. GSIERR("bad state %d\n",
  2487. gsi_ctx->chan[chan_hdl].state);
  2488. return -GSI_STATUS_UNSUPPORTED_OP;
  2489. }
  2490. ctx = &gsi_ctx->chan[chan_hdl];
  2491. mutex_lock(&ctx->mlock);
  2492. __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
  2493. mutex_unlock(&ctx->mlock);
  2494. return GSI_STATUS_SUCCESS;
  2495. }
  2496. EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
  2497. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  2498. struct __packed gsi_mhi_channel_scratch mscr)
  2499. {
  2500. struct gsi_chan_ctx *ctx;
  2501. if (!gsi_ctx) {
  2502. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2503. return -GSI_STATUS_NODEV;
  2504. }
  2505. if (chan_hdl >= gsi_ctx->max_ch) {
  2506. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2507. return -GSI_STATUS_INVALID_PARAMS;
  2508. }
  2509. if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
  2510. gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
  2511. GSIERR("bad state %d\n",
  2512. gsi_ctx->chan[chan_hdl].state);
  2513. return -GSI_STATUS_UNSUPPORTED_OP;
  2514. }
  2515. ctx = &gsi_ctx->chan[chan_hdl];
  2516. mutex_lock(&ctx->mlock);
  2517. ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
  2518. mutex_unlock(&ctx->mlock);
  2519. return GSI_STATUS_SUCCESS;
  2520. }
  2521. EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
  2522. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2523. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
  2524. {
  2525. if (!gsi_ctx) {
  2526. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2527. return -GSI_STATUS_NODEV;
  2528. }
  2529. if (!db_addr_wp_msb || !db_addr_wp_lsb) {
  2530. GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
  2531. db_addr_wp_lsb);
  2532. return -GSI_STATUS_INVALID_PARAMS;
  2533. }
  2534. if (chan_hdl >= gsi_ctx->max_ch) {
  2535. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2536. return -GSI_STATUS_INVALID_PARAMS;
  2537. }
  2538. if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  2539. GSIERR("bad state %d\n",
  2540. gsi_ctx->chan[chan_hdl].state);
  2541. return -GSI_STATUS_UNSUPPORTED_OP;
  2542. }
  2543. *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
  2544. GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
  2545. *db_addr_wp_msb = gsi_ctx->per.phys_addr +
  2546. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
  2547. return GSI_STATUS_SUCCESS;
  2548. }
  2549. EXPORT_SYMBOL(gsi_query_channel_db_addr);
  2550. int gsi_pending_irq_type(void)
  2551. {
  2552. int ee = gsi_ctx->per.ee;
  2553. return gsi_readl(gsi_ctx->base +
  2554. GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
  2555. }
  2556. EXPORT_SYMBOL(gsi_pending_irq_type);
  2557. int gsi_start_channel(unsigned long chan_hdl)
  2558. {
  2559. enum gsi_ch_cmd_opcode op = GSI_CH_START;
  2560. uint32_t val;
  2561. struct gsi_chan_ctx *ctx;
  2562. if (!gsi_ctx) {
  2563. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2564. return -GSI_STATUS_NODEV;
  2565. }
  2566. if (chan_hdl >= gsi_ctx->max_ch) {
  2567. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2568. return -GSI_STATUS_INVALID_PARAMS;
  2569. }
  2570. ctx = &gsi_ctx->chan[chan_hdl];
  2571. if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
  2572. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2573. ctx->state != GSI_CHAN_STATE_STOPPED) {
  2574. GSIERR("bad state %d\n", ctx->state);
  2575. return -GSI_STATUS_UNSUPPORTED_OP;
  2576. }
  2577. mutex_lock(&gsi_ctx->mlock);
  2578. reinit_completion(&ctx->compl);
  2579. /* check if INTSET is in IRQ mode for GPI channel */
  2580. val = gsi_readl(gsi_ctx->base +
  2581. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  2582. if (ctx->evtr &&
  2583. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2584. val != GSI_INTR_IRQ) {
  2585. GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
  2586. BUG();
  2587. }
  2588. gsi_ctx->ch_dbg[chan_hdl].ch_start++;
  2589. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2590. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2591. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2592. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2593. gsi_writel(val, gsi_ctx->base +
  2594. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2595. GSIDBG("GSI Channel Start, waiting for completion\n");
  2596. gsi_channel_state_change_wait(chan_hdl,
  2597. ctx,
  2598. GSI_START_CMD_TIMEOUT_MS, op);
  2599. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2600. ctx->state != GSI_CHAN_STATE_FLOW_CONTROL) {
  2601. /*
  2602. * Hardware returned unexpected status, unexpected
  2603. * hardware state.
  2604. */
  2605. GSIERR("chan=%lu timed out, unexpected state=%u\n",
  2606. chan_hdl, ctx->state);
  2607. GSI_ASSERT();
  2608. }
  2609. GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
  2610. /* write order MUST be MSB followed by LSB */
  2611. val = ((ctx->ring.wp_local >> 32) &
  2612. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
  2613. GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
  2614. gsi_writel(val, gsi_ctx->base +
  2615. GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
  2616. gsi_ctx->per.ee));
  2617. mutex_unlock(&gsi_ctx->mlock);
  2618. return GSI_STATUS_SUCCESS;
  2619. }
  2620. EXPORT_SYMBOL(gsi_start_channel);
  2621. int gsi_stop_channel(unsigned long chan_hdl)
  2622. {
  2623. enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
  2624. int res;
  2625. uint32_t val;
  2626. struct gsi_chan_ctx *ctx;
  2627. if (!gsi_ctx) {
  2628. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2629. return -GSI_STATUS_NODEV;
  2630. }
  2631. if (chan_hdl >= gsi_ctx->max_ch) {
  2632. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2633. return -GSI_STATUS_INVALID_PARAMS;
  2634. }
  2635. ctx = &gsi_ctx->chan[chan_hdl];
  2636. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2637. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2638. return GSI_STATUS_SUCCESS;
  2639. }
  2640. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2641. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
  2642. ctx->state != GSI_CHAN_STATE_ERROR) {
  2643. GSIERR("bad state %d\n", ctx->state);
  2644. return -GSI_STATUS_UNSUPPORTED_OP;
  2645. }
  2646. mutex_lock(&gsi_ctx->mlock);
  2647. reinit_completion(&ctx->compl);
  2648. /* check if INTSET is in IRQ mode for GPI channel */
  2649. val = gsi_readl(gsi_ctx->base +
  2650. GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
  2651. if (ctx->evtr &&
  2652. ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
  2653. val != GSI_INTR_IRQ) {
  2654. GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
  2655. BUG();
  2656. }
  2657. gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
  2658. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2659. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2660. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2661. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2662. gsi_writel(val, gsi_ctx->base +
  2663. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2664. GSIDBG("GSI Channel Stop, waiting for completion\n");
  2665. gsi_channel_state_change_wait(chan_hdl,
  2666. ctx,
  2667. GSI_STOP_CMD_TIMEOUT_MS, op);
  2668. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2669. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2670. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2671. res = -GSI_STATUS_BAD_STATE;
  2672. BUG();
  2673. goto free_lock;
  2674. }
  2675. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2676. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2677. res = -GSI_STATUS_AGAIN;
  2678. goto free_lock;
  2679. }
  2680. res = GSI_STATUS_SUCCESS;
  2681. free_lock:
  2682. mutex_unlock(&gsi_ctx->mlock);
  2683. return res;
  2684. }
  2685. EXPORT_SYMBOL(gsi_stop_channel);
  2686. int gsi_stop_db_channel(unsigned long chan_hdl)
  2687. {
  2688. enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
  2689. int res;
  2690. uint32_t val;
  2691. struct gsi_chan_ctx *ctx;
  2692. if (!gsi_ctx) {
  2693. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2694. return -GSI_STATUS_NODEV;
  2695. }
  2696. if (chan_hdl >= gsi_ctx->max_ch) {
  2697. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2698. return -GSI_STATUS_INVALID_PARAMS;
  2699. }
  2700. ctx = &gsi_ctx->chan[chan_hdl];
  2701. if (ctx->state == GSI_CHAN_STATE_STOPPED) {
  2702. GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
  2703. return GSI_STATUS_SUCCESS;
  2704. }
  2705. if (ctx->state != GSI_CHAN_STATE_STARTED &&
  2706. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2707. GSIERR("bad state %d\n", ctx->state);
  2708. return -GSI_STATUS_UNSUPPORTED_OP;
  2709. }
  2710. mutex_lock(&gsi_ctx->mlock);
  2711. reinit_completion(&ctx->compl);
  2712. gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
  2713. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2714. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2715. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2716. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2717. gsi_writel(val, gsi_ctx->base +
  2718. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2719. res = wait_for_completion_timeout(&ctx->compl,
  2720. msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
  2721. if (res == 0) {
  2722. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2723. res = -GSI_STATUS_TIMED_OUT;
  2724. goto free_lock;
  2725. }
  2726. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2727. ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
  2728. GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
  2729. res = -GSI_STATUS_BAD_STATE;
  2730. goto free_lock;
  2731. }
  2732. if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
  2733. GSIERR("chan=%lu busy try again\n", chan_hdl);
  2734. res = -GSI_STATUS_AGAIN;
  2735. goto free_lock;
  2736. }
  2737. res = GSI_STATUS_SUCCESS;
  2738. free_lock:
  2739. mutex_unlock(&gsi_ctx->mlock);
  2740. return res;
  2741. }
  2742. EXPORT_SYMBOL(gsi_stop_db_channel);
  2743. int gsi_reset_channel(unsigned long chan_hdl)
  2744. {
  2745. enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
  2746. int res;
  2747. uint32_t val;
  2748. struct gsi_chan_ctx *ctx;
  2749. bool reset_done = false;
  2750. uint32_t retry_cnt = 0;
  2751. if (!gsi_ctx) {
  2752. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2753. return -GSI_STATUS_NODEV;
  2754. }
  2755. if (chan_hdl >= gsi_ctx->max_ch) {
  2756. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2757. return -GSI_STATUS_INVALID_PARAMS;
  2758. }
  2759. ctx = &gsi_ctx->chan[chan_hdl];
  2760. /*
  2761. * In WDI3 case, if SAP enabled but no client connected,
  2762. * GSI will be in allocated state. When SAP disabled,
  2763. * gsi_reset_channel will be called and reset is needed.
  2764. */
  2765. if (ctx->state != GSI_CHAN_STATE_STOPPED &&
  2766. ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2767. GSIERR("bad state %d\n", ctx->state);
  2768. return -GSI_STATUS_UNSUPPORTED_OP;
  2769. }
  2770. mutex_lock(&gsi_ctx->mlock);
  2771. reset:
  2772. reinit_completion(&ctx->compl);
  2773. gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
  2774. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2775. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2776. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2777. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2778. gsi_writel(val, gsi_ctx->base +
  2779. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2780. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2781. if (res == 0) {
  2782. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2783. mutex_unlock(&gsi_ctx->mlock);
  2784. return -GSI_STATUS_TIMED_OUT;
  2785. }
  2786. revrfy_chnlstate:
  2787. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2788. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2789. ctx->state);
  2790. /* GSI register update state not sync with gsi channel
  2791. * context state not sync, need to wait for 1ms to sync.
  2792. */
  2793. retry_cnt++;
  2794. if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
  2795. usleep_range(GSI_RESET_WA_MIN_SLEEP,
  2796. GSI_RESET_WA_MAX_SLEEP);
  2797. goto revrfy_chnlstate;
  2798. }
  2799. /*
  2800. * Hardware returned incorrect state, unexpected
  2801. * hardware state.
  2802. */
  2803. GSI_ASSERT();
  2804. }
  2805. /* Hardware issue fixed from GSI 2.0 and no need for the WA */
  2806. if (gsi_ctx->per.ver >= GSI_VER_2_0)
  2807. reset_done = true;
  2808. /* workaround: reset GSI producers again */
  2809. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
  2810. usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
  2811. reset_done = true;
  2812. goto reset;
  2813. }
  2814. if (ctx->props.cleanup_cb)
  2815. gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
  2816. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  2817. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  2818. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  2819. /* restore scratch */
  2820. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  2821. mutex_unlock(&gsi_ctx->mlock);
  2822. return GSI_STATUS_SUCCESS;
  2823. }
  2824. EXPORT_SYMBOL(gsi_reset_channel);
  2825. int gsi_dealloc_channel(unsigned long chan_hdl)
  2826. {
  2827. enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
  2828. int res;
  2829. uint32_t val;
  2830. struct gsi_chan_ctx *ctx;
  2831. if (!gsi_ctx) {
  2832. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2833. return -GSI_STATUS_NODEV;
  2834. }
  2835. if (chan_hdl >= gsi_ctx->max_ch) {
  2836. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  2837. return -GSI_STATUS_INVALID_PARAMS;
  2838. }
  2839. ctx = &gsi_ctx->chan[chan_hdl];
  2840. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  2841. GSIERR("bad state %d\n", ctx->state);
  2842. return -GSI_STATUS_UNSUPPORTED_OP;
  2843. }
  2844. /*In GSI_VER_2_2 version deallocation channel not supported*/
  2845. if (gsi_ctx->per.ver != GSI_VER_2_2) {
  2846. mutex_lock(&gsi_ctx->mlock);
  2847. reinit_completion(&ctx->compl);
  2848. gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
  2849. val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
  2850. GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
  2851. ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
  2852. GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
  2853. gsi_writel(val, gsi_ctx->base +
  2854. GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
  2855. res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
  2856. if (res == 0) {
  2857. GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
  2858. mutex_unlock(&gsi_ctx->mlock);
  2859. return -GSI_STATUS_TIMED_OUT;
  2860. }
  2861. if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
  2862. GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
  2863. ctx->state);
  2864. /* Hardware returned incorrect value */
  2865. GSI_ASSERT();
  2866. }
  2867. mutex_unlock(&gsi_ctx->mlock);
  2868. } else {
  2869. mutex_lock(&gsi_ctx->mlock);
  2870. GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
  2871. ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
  2872. GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
  2873. ctx->state);
  2874. mutex_unlock(&gsi_ctx->mlock);
  2875. }
  2876. devm_kfree(gsi_ctx->dev, ctx->user_data);
  2877. ctx->allocated = false;
  2878. if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
  2879. atomic_dec(&ctx->evtr->chan_ref_cnt);
  2880. atomic_dec(&gsi_ctx->num_chan);
  2881. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  2882. gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
  2883. gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
  2884. }
  2885. return GSI_STATUS_SUCCESS;
  2886. }
  2887. EXPORT_SYMBOL(gsi_dealloc_channel);
  2888. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
  2889. {
  2890. unsigned long now = jiffies_to_msecs(jiffies);
  2891. unsigned long elapsed;
  2892. if (used == 0) {
  2893. elapsed = now - ctx->stats.dp.last_timestamp;
  2894. if (ctx->stats.dp.empty_time < elapsed)
  2895. ctx->stats.dp.empty_time = elapsed;
  2896. }
  2897. if (used <= ctx->props.max_re_expected / 3)
  2898. ++ctx->stats.dp.ch_below_lo;
  2899. else if (used <= 2 * ctx->props.max_re_expected / 3)
  2900. ++ctx->stats.dp.ch_below_hi;
  2901. else
  2902. ++ctx->stats.dp.ch_above_hi;
  2903. ctx->stats.dp.last_timestamp = now;
  2904. }
  2905. static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
  2906. uint16_t *num_free_re)
  2907. {
  2908. uint16_t start;
  2909. uint16_t end;
  2910. uint64_t rp;
  2911. int ee = gsi_ctx->per.ee;
  2912. uint16_t used;
  2913. WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
  2914. if (!ctx->evtr) {
  2915. rp = gsi_readl(gsi_ctx->base +
  2916. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  2917. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  2918. ctx->ring.rp = rp;
  2919. } else {
  2920. rp = ctx->ring.rp_local;
  2921. }
  2922. start = gsi_find_idx_from_addr(&ctx->ring, rp);
  2923. end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  2924. if (end >= start)
  2925. used = end - start;
  2926. else
  2927. used = ctx->ring.max_num_elem + 1 - (start - end);
  2928. *num_free_re = ctx->ring.max_num_elem - used;
  2929. }
  2930. int gsi_query_channel_info(unsigned long chan_hdl,
  2931. struct gsi_chan_info *info)
  2932. {
  2933. struct gsi_chan_ctx *ctx;
  2934. spinlock_t *slock;
  2935. unsigned long flags;
  2936. uint64_t rp;
  2937. uint64_t wp;
  2938. int ee;
  2939. if (!gsi_ctx) {
  2940. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  2941. return -GSI_STATUS_NODEV;
  2942. }
  2943. if (chan_hdl >= gsi_ctx->max_ch || !info) {
  2944. GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
  2945. return -GSI_STATUS_INVALID_PARAMS;
  2946. }
  2947. ctx = &gsi_ctx->chan[chan_hdl];
  2948. if (ctx->evtr) {
  2949. slock = &ctx->evtr->ring.slock;
  2950. info->evt_valid = true;
  2951. } else {
  2952. slock = &ctx->ring.slock;
  2953. info->evt_valid = false;
  2954. }
  2955. spin_lock_irqsave(slock, flags);
  2956. ee = gsi_ctx->per.ee;
  2957. rp = gsi_readl(gsi_ctx->base +
  2958. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  2959. rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2960. GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
  2961. ctx->ring.rp = rp;
  2962. info->rp = rp;
  2963. wp = gsi_readl(gsi_ctx->base +
  2964. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
  2965. wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2966. GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
  2967. ctx->ring.wp = wp;
  2968. info->wp = wp;
  2969. if (info->evt_valid) {
  2970. rp = gsi_readl(gsi_ctx->base +
  2971. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
  2972. rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2973. GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
  2974. << 32;
  2975. info->evt_rp = rp;
  2976. wp = gsi_readl(gsi_ctx->base +
  2977. GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
  2978. wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
  2979. GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
  2980. << 32;
  2981. info->evt_wp = wp;
  2982. }
  2983. spin_unlock_irqrestore(slock, flags);
  2984. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
  2985. chan_hdl, info->rp, info->wp,
  2986. info->evt_valid, info->evt_rp, info->evt_wp);
  2987. return GSI_STATUS_SUCCESS;
  2988. }
  2989. EXPORT_SYMBOL(gsi_query_channel_info);
  2990. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
  2991. {
  2992. struct gsi_chan_ctx *ctx;
  2993. spinlock_t *slock;
  2994. unsigned long flags;
  2995. uint64_t rp;
  2996. uint64_t wp;
  2997. uint64_t rp_local;
  2998. int ee;
  2999. if (!gsi_ctx) {
  3000. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3001. return -GSI_STATUS_NODEV;
  3002. }
  3003. if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
  3004. GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
  3005. chan_hdl, is_empty);
  3006. return -GSI_STATUS_INVALID_PARAMS;
  3007. }
  3008. ctx = &gsi_ctx->chan[chan_hdl];
  3009. ee = gsi_ctx->per.ee;
  3010. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3011. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3012. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3013. return -GSI_STATUS_UNSUPPORTED_OP;
  3014. }
  3015. if (ctx->evtr)
  3016. slock = &ctx->evtr->ring.slock;
  3017. else
  3018. slock = &ctx->ring.slock;
  3019. spin_lock_irqsave(slock, flags);
  3020. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
  3021. rp = gsi_readl(gsi_ctx->base +
  3022. GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
  3023. rp |= ctx->evtr->ring.rp & 0xFFFFFFFF00000000;
  3024. ctx->evtr->ring.rp = rp;
  3025. wp = gsi_readl(gsi_ctx->base +
  3026. GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
  3027. wp |= ctx->evtr->ring.wp & 0xFFFFFFFF00000000;
  3028. ctx->evtr->ring.wp = wp;
  3029. rp_local = ctx->evtr->ring.rp_local;
  3030. } else {
  3031. rp = gsi_readl(gsi_ctx->base +
  3032. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
  3033. rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
  3034. ctx->ring.rp = rp;
  3035. wp = gsi_readl(gsi_ctx->base +
  3036. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
  3037. wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
  3038. ctx->ring.wp = wp;
  3039. rp_local = ctx->ring.rp_local;
  3040. }
  3041. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
  3042. *is_empty = (rp_local == rp) ? true : false;
  3043. else
  3044. *is_empty = (wp == rp) ? true : false;
  3045. spin_unlock_irqrestore(slock, flags);
  3046. if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
  3047. GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3048. chan_hdl, ctx->evtr->id, rp, wp, rp_local);
  3049. else
  3050. GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
  3051. chan_hdl, rp, wp, rp_local);
  3052. return GSI_STATUS_SUCCESS;
  3053. }
  3054. EXPORT_SYMBOL(gsi_is_channel_empty);
  3055. int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
  3056. {
  3057. int i;
  3058. int end;
  3059. if (!ctx->user_data[idx].valid) {
  3060. ctx->user_data[idx].valid = true;
  3061. return idx;
  3062. }
  3063. /*
  3064. * at this point we need to find an "escape buffer" for the cookie
  3065. * as the userdata in this spot is in use. This happens if the TRE at
  3066. * idx is not completed yet and it is getting reused by a new TRE.
  3067. */
  3068. ctx->stats.userdata_in_use++;
  3069. end = ctx->ring.max_num_elem + 1;
  3070. for (i = 0; i < GSI_VEID_MAX; i++) {
  3071. if (!ctx->user_data[end + i].valid) {
  3072. ctx->user_data[end + i].valid = true;
  3073. return end + i;
  3074. }
  3075. }
  3076. /* Go over original userdata when escape buffer is full (costly) */
  3077. GSIDBG("escape buffer is full\n");
  3078. for (i = 0; i < end; i++) {
  3079. if (!ctx->user_data[i].valid) {
  3080. ctx->user_data[i].valid = true;
  3081. return i;
  3082. }
  3083. }
  3084. /* Everything is full (possibly a stall) */
  3085. GSIERR("both userdata array and escape buffer is full\n");
  3086. BUG();
  3087. return 0xFFFF;
  3088. }
  3089. int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
  3090. struct gsi_xfer_elem *xfer)
  3091. {
  3092. struct gsi_gci_tre gci_tre;
  3093. struct gsi_gci_tre *tre_gci_ptr;
  3094. uint16_t idx;
  3095. memset(&gci_tre, 0, sizeof(gci_tre));
  3096. if (xfer->addr & 0xFFFFFF0000000000) {
  3097. GSIERR("chan_hdl=%u add too large=%llx\n",
  3098. ctx->props.ch_id, xfer->addr);
  3099. return -EINVAL;
  3100. }
  3101. if (xfer->type != GSI_XFER_ELEM_DATA) {
  3102. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3103. xfer->type);
  3104. return -EINVAL;
  3105. }
  3106. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3107. tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
  3108. idx * ctx->ring.elem_sz);
  3109. gci_tre.buffer_ptr = xfer->addr;
  3110. gci_tre.buf_len = xfer->len;
  3111. gci_tre.re_type = GSI_RE_COAL;
  3112. gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
  3113. if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
  3114. return -EPERM;
  3115. /* write the TRE to ring */
  3116. *tre_gci_ptr = gci_tre;
  3117. ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
  3118. return 0;
  3119. }
  3120. int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
  3121. struct gsi_xfer_elem *xfer)
  3122. {
  3123. struct gsi_tre tre;
  3124. struct gsi_tre *tre_ptr;
  3125. uint16_t idx;
  3126. memset(&tre, 0, sizeof(tre));
  3127. tre.buffer_ptr = xfer->addr;
  3128. tre.buf_len = xfer->len;
  3129. if (xfer->type == GSI_XFER_ELEM_DATA) {
  3130. tre.re_type = GSI_RE_XFER;
  3131. } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
  3132. tre.re_type = GSI_RE_IMMD_CMD;
  3133. } else if (xfer->type == GSI_XFER_ELEM_NOP) {
  3134. tre.re_type = GSI_RE_NOP;
  3135. } else {
  3136. GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
  3137. xfer->type);
  3138. return -EINVAL;
  3139. }
  3140. tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
  3141. tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
  3142. tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
  3143. tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
  3144. idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
  3145. tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
  3146. idx * ctx->ring.elem_sz);
  3147. /* write the TRE to ring */
  3148. *tre_ptr = tre;
  3149. ctx->user_data[idx].valid = true;
  3150. ctx->user_data[idx].p = xfer->xfer_user_data;
  3151. return 0;
  3152. }
  3153. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  3154. struct gsi_xfer_elem *xfer, bool ring_db)
  3155. {
  3156. struct gsi_chan_ctx *ctx;
  3157. uint16_t free;
  3158. uint64_t wp_rollback;
  3159. int i;
  3160. spinlock_t *slock;
  3161. unsigned long flags;
  3162. if (!gsi_ctx) {
  3163. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3164. return -GSI_STATUS_NODEV;
  3165. }
  3166. if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
  3167. GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
  3168. chan_hdl, num_xfers, xfer);
  3169. return -GSI_STATUS_INVALID_PARAMS;
  3170. }
  3171. if (unlikely(gsi_ctx->chan[chan_hdl].state
  3172. == GSI_CHAN_STATE_NOT_ALLOCATED)) {
  3173. GSIERR("bad state %d\n",
  3174. gsi_ctx->chan[chan_hdl].state);
  3175. return -GSI_STATUS_UNSUPPORTED_OP;
  3176. }
  3177. ctx = &gsi_ctx->chan[chan_hdl];
  3178. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3179. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3180. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3181. return -GSI_STATUS_UNSUPPORTED_OP;
  3182. }
  3183. if (ctx->evtr)
  3184. slock = &ctx->evtr->ring.slock;
  3185. else
  3186. slock = &ctx->ring.slock;
  3187. spin_lock_irqsave(slock, flags);
  3188. /* allow only ring doorbell */
  3189. if (!num_xfers)
  3190. goto ring_doorbell;
  3191. /*
  3192. * for GCI channels the responsibility is on the caller to make sure
  3193. * there is enough room in the TRE.
  3194. */
  3195. if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3196. __gsi_query_channel_free_re(ctx, &free);
  3197. if (num_xfers > free) {
  3198. GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
  3199. chan_hdl, num_xfers, free);
  3200. spin_unlock_irqrestore(slock, flags);
  3201. return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
  3202. }
  3203. }
  3204. wp_rollback = ctx->ring.wp_local;
  3205. for (i = 0; i < num_xfers; i++) {
  3206. if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
  3207. if (__gsi_populate_gci_tre(ctx, &xfer[i]))
  3208. break;
  3209. } else {
  3210. if (__gsi_populate_tre(ctx, &xfer[i]))
  3211. break;
  3212. }
  3213. gsi_incr_ring_wp(&ctx->ring);
  3214. }
  3215. if (i != num_xfers) {
  3216. /* reject all the xfers */
  3217. ctx->ring.wp_local = wp_rollback;
  3218. spin_unlock_irqrestore(slock, flags);
  3219. return -GSI_STATUS_INVALID_PARAMS;
  3220. }
  3221. ctx->stats.queued += num_xfers;
  3222. ring_doorbell:
  3223. if (ring_db) {
  3224. /* ensure TRE is set before ringing doorbell */
  3225. wmb();
  3226. gsi_ring_chan_doorbell(ctx);
  3227. }
  3228. spin_unlock_irqrestore(slock, flags);
  3229. return GSI_STATUS_SUCCESS;
  3230. }
  3231. EXPORT_SYMBOL(gsi_queue_xfer);
  3232. int gsi_start_xfer(unsigned long chan_hdl)
  3233. {
  3234. struct gsi_chan_ctx *ctx;
  3235. if (!gsi_ctx) {
  3236. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3237. return -GSI_STATUS_NODEV;
  3238. }
  3239. if (chan_hdl >= gsi_ctx->max_ch) {
  3240. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3241. return -GSI_STATUS_INVALID_PARAMS;
  3242. }
  3243. ctx = &gsi_ctx->chan[chan_hdl];
  3244. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3245. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3246. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3247. return -GSI_STATUS_UNSUPPORTED_OP;
  3248. }
  3249. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3250. GSIERR("bad state %d\n", ctx->state);
  3251. return -GSI_STATUS_UNSUPPORTED_OP;
  3252. }
  3253. if (ctx->ring.wp == ctx->ring.wp_local)
  3254. return GSI_STATUS_SUCCESS;
  3255. gsi_ring_chan_doorbell(ctx);
  3256. return GSI_STATUS_SUCCESS;
  3257. };
  3258. EXPORT_SYMBOL(gsi_start_xfer);
  3259. int gsi_poll_channel(unsigned long chan_hdl,
  3260. struct gsi_chan_xfer_notify *notify)
  3261. {
  3262. int unused_var;
  3263. return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
  3264. }
  3265. EXPORT_SYMBOL(gsi_poll_channel);
  3266. int gsi_poll_n_channel(unsigned long chan_hdl,
  3267. struct gsi_chan_xfer_notify *notify,
  3268. int expected_num, int *actual_num)
  3269. {
  3270. struct gsi_chan_ctx *ctx;
  3271. uint64_t rp;
  3272. int ee;
  3273. int i;
  3274. unsigned long flags;
  3275. if (!gsi_ctx) {
  3276. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3277. return -GSI_STATUS_NODEV;
  3278. }
  3279. if (chan_hdl >= gsi_ctx->max_ch || !notify ||
  3280. !actual_num || expected_num <= 0) {
  3281. GSIERR("bad params chan_hdl=%lu notify=%pK\n",
  3282. chan_hdl, notify);
  3283. GSIERR("actual_num=%pK expected_num=%d\n",
  3284. actual_num, expected_num);
  3285. return -GSI_STATUS_INVALID_PARAMS;
  3286. }
  3287. ctx = &gsi_ctx->chan[chan_hdl];
  3288. ee = gsi_ctx->per.ee;
  3289. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3290. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3291. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3292. return -GSI_STATUS_UNSUPPORTED_OP;
  3293. }
  3294. if (!ctx->evtr) {
  3295. GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
  3296. return -GSI_STATUS_UNSUPPORTED_OP;
  3297. }
  3298. spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
  3299. if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
  3300. /* update rp to see of we have anything new to process */
  3301. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3302. &ctx->evtr->props, ctx->evtr->id, ee);
  3303. rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
  3304. ctx->evtr->ring.rp = rp;
  3305. /* read gsi event ring rp again if last read is empty */
  3306. if (rp == ctx->evtr->ring.rp_local) {
  3307. /* event ring is empty */
  3308. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3309. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
  3310. /* do another read to close a small window */
  3311. __iowmb();
  3312. rp = ctx->evtr->props.gsi_read_event_ring_rp(
  3313. &ctx->evtr->props, ctx->evtr->id, ee);
  3314. rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
  3315. ctx->evtr->ring.rp = rp;
  3316. if (rp == ctx->evtr->ring.rp_local) {
  3317. spin_unlock_irqrestore(
  3318. &ctx->evtr->ring.slock,
  3319. flags);
  3320. ctx->stats.poll_empty++;
  3321. return GSI_STATUS_POLL_EMPTY;
  3322. }
  3323. }
  3324. }
  3325. *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
  3326. ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
  3327. if (*actual_num > expected_num)
  3328. *actual_num = expected_num;
  3329. for (i = 0; i < *actual_num; i++)
  3330. gsi_process_evt_re(ctx->evtr, notify + i, false);
  3331. spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
  3332. ctx->stats.poll_ok++;
  3333. return GSI_STATUS_SUCCESS;
  3334. }
  3335. EXPORT_SYMBOL(gsi_poll_n_channel);
  3336. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
  3337. {
  3338. struct gsi_chan_ctx *ctx, *coal_ctx;
  3339. enum gsi_chan_mode curr;
  3340. unsigned long flags;
  3341. enum gsi_chan_mode chan_mode;
  3342. if (!gsi_ctx) {
  3343. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3344. return -GSI_STATUS_NODEV;
  3345. }
  3346. if (chan_hdl >= gsi_ctx->max_ch) {
  3347. GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
  3348. return -GSI_STATUS_INVALID_PARAMS;
  3349. }
  3350. ctx = &gsi_ctx->chan[chan_hdl];
  3351. if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
  3352. ctx->props.prot != GSI_CHAN_PROT_GCI) {
  3353. GSIERR("op not supported for protocol %u\n", ctx->props.prot);
  3354. return -GSI_STATUS_UNSUPPORTED_OP;
  3355. }
  3356. if (!ctx->evtr || !ctx->evtr->props.exclusive) {
  3357. GSIERR("cannot configure mode on chan_hdl=%lu\n",
  3358. chan_hdl);
  3359. return -GSI_STATUS_UNSUPPORTED_OP;
  3360. }
  3361. if (atomic_read(&ctx->poll_mode))
  3362. curr = GSI_CHAN_MODE_POLL;
  3363. else
  3364. curr = GSI_CHAN_MODE_CALLBACK;
  3365. if (mode == curr) {
  3366. GSIDBG("already in requested mode %u chan_hdl=%lu\n",
  3367. curr, chan_hdl);
  3368. return -GSI_STATUS_UNSUPPORTED_OP;
  3369. }
  3370. spin_lock_irqsave(&gsi_ctx->slock, flags);
  3371. if (curr == GSI_CHAN_MODE_CALLBACK &&
  3372. mode == GSI_CHAN_MODE_POLL) {
  3373. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3374. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3375. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
  3376. atomic_set(&ctx->poll_mode, mode);
  3377. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3378. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3379. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3380. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3381. if (coal_ctx != NULL)
  3382. atomic_set(&coal_ctx->poll_mode, mode);
  3383. }
  3384. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3385. ctx->evtr->id, mode);
  3386. ctx->stats.callback_to_poll++;
  3387. }
  3388. if (curr == GSI_CHAN_MODE_POLL &&
  3389. mode == GSI_CHAN_MODE_CALLBACK) {
  3390. atomic_set(&ctx->poll_mode, mode);
  3391. if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
  3392. atomic_set(&ctx->evtr->chan->poll_mode, mode);
  3393. } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
  3394. coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
  3395. if (coal_ctx != NULL)
  3396. atomic_set(&coal_ctx->poll_mode, mode);
  3397. }
  3398. __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
  3399. GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
  3400. ctx->evtr->id, mode);
  3401. /*
  3402. * In GSI 2.2 and 2.5 there is a limitation that can lead
  3403. * to losing an interrupt. For these versions an
  3404. * explicit check is needed after enabling the interrupt
  3405. */
  3406. if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
  3407. gsi_ctx->per.ver == GSI_VER_2_5) &&
  3408. !gsi_ctx->per.skip_ieob_mask_wa) {
  3409. u32 src = gsi_readl(gsi_ctx->base +
  3410. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
  3411. gsi_ctx->per.ee));
  3412. if (src & (1 << ctx->evtr->id)) {
  3413. __gsi_config_ieob_irq(
  3414. gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
  3415. gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
  3416. GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
  3417. gsi_ctx->per.ee));
  3418. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3419. spin_lock_irqsave(&ctx->evtr->ring.slock,
  3420. flags);
  3421. chan_mode = atomic_xchg(&ctx->poll_mode,
  3422. GSI_CHAN_MODE_POLL);
  3423. spin_unlock_irqrestore(
  3424. &ctx->evtr->ring.slock, flags);
  3425. ctx->stats.poll_pending_irq++;
  3426. GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
  3427. ctx->stats.poll_pending_irq,
  3428. chan_mode);
  3429. if (chan_mode == GSI_CHAN_MODE_POLL)
  3430. return GSI_STATUS_SUCCESS;
  3431. else
  3432. return -GSI_STATUS_PENDING_IRQ;
  3433. }
  3434. }
  3435. ctx->stats.poll_to_callback++;
  3436. }
  3437. spin_unlock_irqrestore(&gsi_ctx->slock, flags);
  3438. return GSI_STATUS_SUCCESS;
  3439. }
  3440. EXPORT_SYMBOL(gsi_config_channel_mode);
  3441. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3442. union gsi_channel_scratch *scr)
  3443. {
  3444. struct gsi_chan_ctx *ctx;
  3445. if (!gsi_ctx) {
  3446. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3447. return -GSI_STATUS_NODEV;
  3448. }
  3449. if (!props || !scr) {
  3450. GSIERR("bad params props=%pK scr=%pK\n", props, scr);
  3451. return -GSI_STATUS_INVALID_PARAMS;
  3452. }
  3453. if (chan_hdl >= gsi_ctx->max_ch) {
  3454. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3455. return -GSI_STATUS_INVALID_PARAMS;
  3456. }
  3457. ctx = &gsi_ctx->chan[chan_hdl];
  3458. if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
  3459. GSIERR("bad state %d\n", ctx->state);
  3460. return -GSI_STATUS_UNSUPPORTED_OP;
  3461. }
  3462. mutex_lock(&ctx->mlock);
  3463. *props = ctx->props;
  3464. *scr = ctx->scratch;
  3465. mutex_unlock(&ctx->mlock);
  3466. return GSI_STATUS_SUCCESS;
  3467. }
  3468. EXPORT_SYMBOL(gsi_get_channel_cfg);
  3469. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  3470. union gsi_channel_scratch *scr)
  3471. {
  3472. struct gsi_chan_ctx *ctx;
  3473. if (!gsi_ctx) {
  3474. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3475. return -GSI_STATUS_NODEV;
  3476. }
  3477. if (!props || gsi_validate_channel_props(props)) {
  3478. GSIERR("bad params props=%pK\n", props);
  3479. return -GSI_STATUS_INVALID_PARAMS;
  3480. }
  3481. if (chan_hdl >= gsi_ctx->max_ch) {
  3482. GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
  3483. return -GSI_STATUS_INVALID_PARAMS;
  3484. }
  3485. ctx = &gsi_ctx->chan[chan_hdl];
  3486. if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
  3487. GSIERR("bad state %d\n", ctx->state);
  3488. return -GSI_STATUS_UNSUPPORTED_OP;
  3489. }
  3490. if (ctx->props.ch_id != props->ch_id ||
  3491. ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
  3492. GSIERR("changing immutable fields not supported\n");
  3493. return -GSI_STATUS_UNSUPPORTED_OP;
  3494. }
  3495. mutex_lock(&ctx->mlock);
  3496. ctx->props = *props;
  3497. if (scr)
  3498. ctx->scratch = *scr;
  3499. gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
  3500. ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
  3501. gsi_init_chan_ring(&ctx->props, &ctx->ring);
  3502. /* restore scratch */
  3503. __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
  3504. mutex_unlock(&ctx->mlock);
  3505. return GSI_STATUS_SUCCESS;
  3506. }
  3507. EXPORT_SYMBOL(gsi_set_channel_cfg);
  3508. static void gsi_configure_ieps(void *base, enum gsi_ver ver)
  3509. {
  3510. void __iomem *gsi_base = base;
  3511. gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
  3512. gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
  3513. gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
  3514. gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
  3515. gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
  3516. gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
  3517. gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS);
  3518. gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
  3519. gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
  3520. gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
  3521. gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
  3522. gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
  3523. gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
  3524. gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
  3525. gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
  3526. gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
  3527. if (ver >= GSI_VER_2_5)
  3528. gsi_writel(17,
  3529. gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS);
  3530. if (ver >= GSI_VER_2_11)
  3531. gsi_writel(18, gsi_base + GSI_GSI_IRAM_PTR_MSI_DB_OFFS);
  3532. }
  3533. static void gsi_configure_bck_prs_matrix(void *base)
  3534. {
  3535. void __iomem *gsi_base = (void __iomem *) base;
  3536. /*
  3537. * For now, these are default values. In the future, GSI FW image will
  3538. * produce optimized back-pressure values based on the FW image.
  3539. */
  3540. gsi_writel(0xfffffffe,
  3541. gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
  3542. gsi_writel(0xffffffff,
  3543. gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
  3544. gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
  3545. gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
  3546. gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
  3547. gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
  3548. gsi_writel(0xffffefff,
  3549. gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
  3550. gsi_writel(0xffffffff,
  3551. gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
  3552. gsi_writel(0x00000000,
  3553. gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
  3554. gsi_writel(0x00000000,
  3555. gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
  3556. gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
  3557. gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
  3558. gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
  3559. gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
  3560. gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
  3561. gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
  3562. gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
  3563. gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
  3564. gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
  3565. gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
  3566. gsi_writel(0xffffffff,
  3567. gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
  3568. gsi_writel(0xff03ffff,
  3569. gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
  3570. }
  3571. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
  3572. {
  3573. if (!gsi_ctx) {
  3574. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3575. return -GSI_STATUS_NODEV;
  3576. }
  3577. if (!gsi_ctx->base) {
  3578. GSIERR("access to GSI HW has not been mapped\n");
  3579. return -GSI_STATUS_INVALID_PARAMS;
  3580. }
  3581. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3582. GSIERR("Incorrect version %d\n", ver);
  3583. return -GSI_STATUS_ERROR;
  3584. }
  3585. gsi_writel(0, gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
  3586. gsi_writel(per_base_addr,
  3587. gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
  3588. gsi_configure_bck_prs_matrix((void *)gsi_ctx->base);
  3589. gsi_configure_ieps(gsi_ctx->base, ver);
  3590. return 0;
  3591. }
  3592. EXPORT_SYMBOL(gsi_configure_regs);
  3593. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
  3594. {
  3595. void __iomem *gsi_base;
  3596. uint32_t value;
  3597. if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
  3598. GSIERR("Incorrect version %d\n", ver);
  3599. return -GSI_STATUS_ERROR;
  3600. }
  3601. gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
  3602. if (!gsi_base) {
  3603. GSIERR("ioremap failed\n");
  3604. return -GSI_STATUS_RES_ALLOC_FAILURE;
  3605. }
  3606. /* Enable the MCS and set to x2 clocks */
  3607. if (ver >= GSI_VER_1_2) {
  3608. value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
  3609. GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
  3610. gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
  3611. value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
  3612. GSI_GSI_CFG_GSI_ENABLE_BMSK) |
  3613. ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
  3614. GSI_GSI_CFG_MCS_ENABLE_BMSK) |
  3615. ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
  3616. GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
  3617. ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
  3618. GSI_GSI_CFG_UC_IS_MCS_BMSK) |
  3619. ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
  3620. GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
  3621. ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
  3622. GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
  3623. } else {
  3624. value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
  3625. GSI_GSI_CFG_GSI_ENABLE_BMSK) |
  3626. ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
  3627. GSI_GSI_CFG_MCS_ENABLE_BMSK) |
  3628. ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
  3629. GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
  3630. ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
  3631. GSI_GSI_CFG_UC_IS_MCS_BMSK));
  3632. }
  3633. /* GSI frequency is peripheral frequency divided by 3 (2+1) */
  3634. if (ver >= GSI_VER_2_5)
  3635. value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) &
  3636. GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK);
  3637. gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
  3638. iounmap(gsi_base);
  3639. return 0;
  3640. }
  3641. EXPORT_SYMBOL(gsi_enable_fw);
  3642. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  3643. unsigned long *size, enum gsi_ver ver)
  3644. {
  3645. unsigned long maxn;
  3646. if (!gsi_ctx) {
  3647. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3648. return;
  3649. }
  3650. switch (ver) {
  3651. case GSI_VER_1_0:
  3652. case GSI_VER_1_2:
  3653. case GSI_VER_1_3:
  3654. maxn = GSI_GSI_INST_RAM_n_MAXn;
  3655. break;
  3656. case GSI_VER_2_0:
  3657. maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn;
  3658. break;
  3659. case GSI_VER_2_2:
  3660. maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn;
  3661. break;
  3662. case GSI_VER_2_5:
  3663. maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn;
  3664. break;
  3665. case GSI_VER_2_7:
  3666. maxn = GSI_V2_7_GSI_INST_RAM_n_MAXn;
  3667. break;
  3668. case GSI_VER_2_9:
  3669. maxn = GSI_V2_9_GSI_INST_RAM_n_MAXn;
  3670. break;
  3671. case GSI_VER_2_11:
  3672. maxn = GSI_V2_11_GSI_INST_RAM_n_MAXn;
  3673. break;
  3674. case GSI_VER_ERR:
  3675. case GSI_VER_MAX:
  3676. default:
  3677. GSIERR("GSI version is not supported %d\n", ver);
  3678. WARN_ON(1);
  3679. return;
  3680. }
  3681. if (size)
  3682. *size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1);
  3683. if (base_offset) {
  3684. if (ver < GSI_VER_2_5)
  3685. *base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
  3686. else
  3687. *base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0);
  3688. }
  3689. }
  3690. EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
  3691. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3692. {
  3693. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
  3694. uint32_t val;
  3695. int res;
  3696. if (!gsi_ctx) {
  3697. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3698. return -GSI_STATUS_NODEV;
  3699. }
  3700. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3701. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3702. return -GSI_STATUS_INVALID_PARAMS;
  3703. }
  3704. mutex_lock(&gsi_ctx->mlock);
  3705. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3706. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, ~0);
  3707. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3708. /* invalidate the response */
  3709. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3710. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3711. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3712. gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
  3713. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3714. gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
  3715. val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
  3716. GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
  3717. ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
  3718. GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
  3719. ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
  3720. GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
  3721. gsi_writel(val, gsi_ctx->base +
  3722. GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
  3723. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3724. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3725. if (res == 0) {
  3726. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3727. res = -GSI_STATUS_TIMED_OUT;
  3728. goto free_lock;
  3729. }
  3730. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3731. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3732. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3733. GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
  3734. GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
  3735. *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
  3736. res = -GSI_STATUS_AGAIN;
  3737. goto free_lock;
  3738. }
  3739. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3740. GSIERR("No response received\n");
  3741. res = -GSI_STATUS_ERROR;
  3742. goto free_lock;
  3743. }
  3744. res = GSI_STATUS_SUCCESS;
  3745. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3746. free_lock:
  3747. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3748. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, 0);
  3749. mutex_unlock(&gsi_ctx->mlock);
  3750. return res;
  3751. }
  3752. EXPORT_SYMBOL(gsi_halt_channel_ee);
  3753. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
  3754. {
  3755. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
  3756. struct gsi_chan_ctx *ctx;
  3757. uint32_t val;
  3758. int res;
  3759. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3760. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3761. return -GSI_STATUS_INVALID_PARAMS;
  3762. }
  3763. if (ee == 0)
  3764. return gsi_alloc_ap_channel(chan_idx);
  3765. mutex_lock(&gsi_ctx->mlock);
  3766. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3767. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, ~0);
  3768. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3769. /* invalidate the response */
  3770. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3771. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3772. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3773. gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
  3774. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3775. val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
  3776. GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
  3777. ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
  3778. GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
  3779. ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
  3780. GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
  3781. gsi_writel(val, gsi_ctx->base +
  3782. GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
  3783. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3784. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3785. if (res == 0) {
  3786. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3787. res = -GSI_STATUS_TIMED_OUT;
  3788. goto free_lock;
  3789. }
  3790. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3791. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3792. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3793. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
  3794. GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
  3795. *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
  3796. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3797. goto free_lock;
  3798. }
  3799. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3800. GSIERR("No response received\n");
  3801. res = -GSI_STATUS_ERROR;
  3802. goto free_lock;
  3803. }
  3804. if (ee == 0) {
  3805. ctx = &gsi_ctx->chan[chan_idx];
  3806. gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
  3807. }
  3808. res = GSI_STATUS_SUCCESS;
  3809. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3810. free_lock:
  3811. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3812. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, 0);
  3813. mutex_unlock(&gsi_ctx->mlock);
  3814. return res;
  3815. }
  3816. EXPORT_SYMBOL(gsi_alloc_channel_ee);
  3817. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  3818. int *code)
  3819. {
  3820. enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
  3821. uint32_t val;
  3822. enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
  3823. int res;
  3824. if (!gsi_ctx) {
  3825. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3826. return -GSI_STATUS_NODEV;
  3827. }
  3828. if (chan_idx >= gsi_ctx->max_ch || !code) {
  3829. GSIERR("bad params chan_idx=%d\n", chan_idx);
  3830. return -GSI_STATUS_INVALID_PARAMS;
  3831. }
  3832. mutex_lock(&gsi_ctx->mlock);
  3833. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3834. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, ~0);
  3835. reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
  3836. /* invalidate the response */
  3837. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3838. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3839. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
  3840. gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
  3841. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3842. gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
  3843. val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
  3844. GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
  3845. ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
  3846. GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
  3847. ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
  3848. GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
  3849. gsi_writel(val, gsi_ctx->base +
  3850. GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
  3851. res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
  3852. msecs_to_jiffies(GSI_CMD_TIMEOUT));
  3853. if (res == 0) {
  3854. GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
  3855. res = -GSI_STATUS_TIMED_OUT;
  3856. goto free_lock;
  3857. }
  3858. gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
  3859. GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
  3860. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3861. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
  3862. GSIDBG("chan_idx=%u ee=%u not in correct state\n",
  3863. chan_idx, ee);
  3864. *code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
  3865. res = -GSI_STATUS_RES_ALLOC_FAILURE;
  3866. goto free_lock;
  3867. } else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3868. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
  3869. gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
  3870. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX){
  3871. GSIERR("chan_idx=%u ee=%u not in correct state\n",
  3872. chan_idx, ee);
  3873. GSI_ASSERT();
  3874. }
  3875. if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
  3876. GSIERR("No response received\n");
  3877. res = -GSI_STATUS_ERROR;
  3878. goto free_lock;
  3879. }
  3880. /*Reading current channel state*/
  3881. val = gsi_readl(gsi_ctx->base +
  3882. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_idx, ee));
  3883. curr_state = (val &
  3884. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  3885. GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
  3886. if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
  3887. GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
  3888. res = GSI_STATUS_SUCCESS;
  3889. } else {
  3890. GSIERR("ch %u state updated to %u incorrect state\n",
  3891. chan_idx, curr_state);
  3892. res = -GSI_STATUS_ERROR;
  3893. }
  3894. *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
  3895. free_lock:
  3896. __gsi_config_glob_irq(gsi_ctx->per.ee,
  3897. GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK, 0);
  3898. mutex_unlock(&gsi_ctx->mlock);
  3899. return res;
  3900. }
  3901. EXPORT_SYMBOL(gsi_enable_flow_control_ee);
  3902. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
  3903. {
  3904. if (!gsi_ctx) {
  3905. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3906. return -GSI_STATUS_NODEV;
  3907. }
  3908. if (!gsi_ctx->base) {
  3909. GSIERR("access to GSI HW has not been mapped\n");
  3910. return -GSI_STATUS_INVALID_PARAMS;
  3911. }
  3912. gsi_writel(per_ep_index,
  3913. gsi_ctx->base +
  3914. GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(chan_num, ee));
  3915. return 0;
  3916. }
  3917. EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
  3918. void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
  3919. uint32_t db_addr_low, uint32_t db_addr_high)
  3920. {
  3921. if (!gsi_ctx) {
  3922. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3923. return;
  3924. }
  3925. if (gsi_ctx->per.ver >= GSI_VER_2_9) {
  3926. gsi_writel(db_addr_low, gsi_ctx->base +
  3927. GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_ring_hdl,
  3928. gsi_ctx->per.ee));
  3929. gsi_writel(db_addr_high, gsi_ctx->base +
  3930. GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_ring_hdl,
  3931. gsi_ctx->per.ee));
  3932. } else {
  3933. gsi_writel(db_addr_low, gsi_ctx->base +
  3934. GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_ring_hdl,
  3935. gsi_ctx->per.ee));
  3936. gsi_writel(db_addr_high, gsi_ctx->base +
  3937. GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_ring_hdl,
  3938. gsi_ctx->per.ee));
  3939. }
  3940. }
  3941. EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
  3942. void gsi_wdi3_dump_register(unsigned long chan_hdl)
  3943. {
  3944. uint32_t val;
  3945. if (!gsi_ctx) {
  3946. pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
  3947. return;
  3948. }
  3949. GSIDBG("reg dump ch id %ld\n", chan_hdl);
  3950. val = gsi_readl(gsi_ctx->base +
  3951. GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
  3952. gsi_ctx->per.ee));
  3953. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS 0x%x\n", val);
  3954. val = gsi_readl(gsi_ctx->base +
  3955. GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(chan_hdl,
  3956. gsi_ctx->per.ee));
  3957. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS 0x%x\n", val);
  3958. val = gsi_readl(gsi_ctx->base +
  3959. GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(chan_hdl,
  3960. gsi_ctx->per.ee));
  3961. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS 0x%x\n", val);
  3962. val = gsi_readl(gsi_ctx->base +
  3963. GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(chan_hdl,
  3964. gsi_ctx->per.ee));
  3965. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS 0x%x\n", val);
  3966. val = gsi_readl(gsi_ctx->base +
  3967. GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(chan_hdl,
  3968. gsi_ctx->per.ee));
  3969. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS 0x%x\n", val);
  3970. val = gsi_readl(gsi_ctx->base +
  3971. GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(chan_hdl,
  3972. gsi_ctx->per.ee));
  3973. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS 0x%x\n", val);
  3974. val = gsi_readl(gsi_ctx->base +
  3975. GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(chan_hdl,
  3976. gsi_ctx->per.ee));
  3977. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS 0x%x\n", val);
  3978. val = gsi_readl(gsi_ctx->base +
  3979. GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(chan_hdl,
  3980. gsi_ctx->per.ee));
  3981. GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS 0x%x\n", val);
  3982. val = gsi_readl(gsi_ctx->base +
  3983. GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(chan_hdl,
  3984. gsi_ctx->per.ee));
  3985. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS 0x%x\n", val);
  3986. val = gsi_readl(gsi_ctx->base +
  3987. GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(chan_hdl,
  3988. gsi_ctx->per.ee));
  3989. GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS 0x%x\n", val);
  3990. val = gsi_readl(gsi_ctx->base +
  3991. GSI_EE_n_GSI_CH_k_QOS_OFFS(chan_hdl,
  3992. gsi_ctx->per.ee));
  3993. GSIDBG("GSI_EE_n_GSI_CH_k_QOS_OFFS 0x%x\n", val);
  3994. val = gsi_readl(gsi_ctx->base +
  3995. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  3996. gsi_ctx->per.ee));
  3997. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS 0x%x\n", val);
  3998. val = gsi_readl(gsi_ctx->base +
  3999. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  4000. gsi_ctx->per.ee));
  4001. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS 0x%x\n", val);
  4002. val = gsi_readl(gsi_ctx->base +
  4003. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  4004. gsi_ctx->per.ee));
  4005. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS 0x%x\n", val);
  4006. val = gsi_readl(gsi_ctx->base +
  4007. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  4008. gsi_ctx->per.ee));
  4009. GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS 0x%x\n", val);
  4010. }
  4011. EXPORT_SYMBOL(gsi_wdi3_dump_register);
  4012. static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
  4013. unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
  4014. {
  4015. union __packed gsi_channel_scratch scr;
  4016. /* below sequence is not atomic. assumption is sequencer specific fields
  4017. * will remain unchanged across this sequence
  4018. */
  4019. /* READ */
  4020. scr.data.word1 = gsi_readl(gsi_ctx->base +
  4021. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  4022. gsi_ctx->per.ee));
  4023. scr.data.word2 = gsi_readl(gsi_ctx->base +
  4024. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  4025. gsi_ctx->per.ee));
  4026. scr.data.word3 = gsi_readl(gsi_ctx->base +
  4027. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  4028. gsi_ctx->per.ee));
  4029. scr.data.word4 = gsi_readl(gsi_ctx->base +
  4030. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  4031. gsi_ctx->per.ee));
  4032. /* UPDATE */
  4033. scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr;
  4034. scr.mhi.assert_bit40 = mscr.assert_bit40;
  4035. scr.mhi.polling_configuration = mscr.polling_configuration;
  4036. scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled;
  4037. scr.mhi.polling_mode = mscr.polling_mode;
  4038. scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold;
  4039. if (gsi_ctx->per.ver < GSI_VER_2_5) {
  4040. scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
  4041. scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
  4042. }
  4043. /* WRITE */
  4044. gsi_writel(scr.data.word1, gsi_ctx->base +
  4045. GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
  4046. gsi_ctx->per.ee));
  4047. gsi_writel(scr.data.word2, gsi_ctx->base +
  4048. GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
  4049. gsi_ctx->per.ee));
  4050. gsi_writel(scr.data.word3, gsi_ctx->base +
  4051. GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
  4052. gsi_ctx->per.ee));
  4053. gsi_writel(scr.data.word4, gsi_ctx->base +
  4054. GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
  4055. gsi_ctx->per.ee));
  4056. return scr;
  4057. }
  4058. static int msm_gsi_probe(struct platform_device *pdev)
  4059. {
  4060. struct device *dev = &pdev->dev;
  4061. pr_debug("gsi_probe\n");
  4062. gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
  4063. if (!gsi_ctx) {
  4064. dev_err(dev, "failed to allocated gsi context\n");
  4065. return -ENOMEM;
  4066. }
  4067. gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
  4068. "gsi", 0);
  4069. if (gsi_ctx->ipc_logbuf == NULL)
  4070. GSIERR("failed to create IPC log, continue...\n");
  4071. gsi_ctx->dev = dev;
  4072. init_completion(&gsi_ctx->gen_ee_cmd_compl);
  4073. gsi_debugfs_init();
  4074. return 0;
  4075. }
  4076. static struct platform_driver msm_gsi_driver = {
  4077. .probe = msm_gsi_probe,
  4078. .driver = {
  4079. .name = "gsi",
  4080. .of_match_table = msm_gsi_match,
  4081. },
  4082. };
  4083. static struct platform_device *pdev;
  4084. /**
  4085. * Module Init.
  4086. */
  4087. static int __init gsi_init(void)
  4088. {
  4089. int ret;
  4090. pr_debug("%s\n", __func__);
  4091. ret = platform_driver_register(&msm_gsi_driver);
  4092. if (ret < 0)
  4093. goto out;
  4094. if (running_emulation) {
  4095. pdev = platform_device_register_simple("gsi", -1, NULL, 0);
  4096. if (IS_ERR(pdev)) {
  4097. ret = PTR_ERR(pdev);
  4098. platform_driver_unregister(&msm_gsi_driver);
  4099. goto out;
  4100. }
  4101. }
  4102. out:
  4103. return ret;
  4104. }
  4105. arch_initcall(gsi_init);
  4106. /*
  4107. * Module exit.
  4108. */
  4109. static void __exit gsi_exit(void)
  4110. {
  4111. if (running_emulation && pdev)
  4112. platform_device_unregister(pdev);
  4113. platform_driver_unregister(&msm_gsi_driver);
  4114. }
  4115. module_exit(gsi_exit);
  4116. MODULE_LICENSE("GPL v2");
  4117. MODULE_DESCRIPTION("Generic Software Interface (GSI)");