1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
- #include <linux/of.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/log2.h>
- #include <linux/module.h>
- #include <linux/msm_gsi.h>
- #include <linux/platform_device.h>
- #include <linux/delay.h>
- #include "gsi.h"
- #include "gsi_reg.h"
- #include "gsi_emulation.h"
- #define GSI_CMD_TIMEOUT (5*HZ)
- #define GSI_START_CMD_TIMEOUT_MS 1000
- #define GSI_CMD_POLL_CNT 5
- #define GSI_STOP_CMD_TIMEOUT_MS 200
- #define GSI_MAX_CH_LOW_WEIGHT 15
- #define GSI_IRQ_STORM_THR 5
- #define GSI_STOP_CMD_POLL_CNT 4
- #define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
- #define GSI_RESET_WA_MIN_SLEEP 1000
- #define GSI_RESET_WA_MAX_SLEEP 2000
- #define GSI_CHNL_STATE_MAX_RETRYCNT 10
- #define GSI_STTS_REG_BITS 32
- #ifndef CONFIG_DEBUG_FS
- void gsi_debugfs_init(void)
- {
- }
- #endif
- static const struct of_device_id msm_gsi_match[] = {
- { .compatible = "qcom,msm_gsi", },
- { },
- };
- #if defined(CONFIG_IPA_EMULATION)
- static bool running_emulation = true;
- #else
- static bool running_emulation;
- #endif
- struct gsi_ctx *gsi_ctx;
- static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
- unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr);
- static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
- }
- static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
- }
- static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
- }
- static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
- GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
- curr, ((curr & ~mask) | (val & mask)));
- }
- static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
- }
- static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
- {
- uint32_t curr;
- curr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
- }
- static void gsi_channel_state_change_wait(unsigned long chan_hdl,
- struct gsi_chan_ctx *ctx,
- uint32_t tm, enum gsi_ch_cmd_opcode op)
- {
- int poll_cnt;
- int gsi_pending_intr;
- int res;
- uint32_t type;
- uint32_t val;
- int ee = gsi_ctx->per.ee;
- enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
- int stop_in_proc_retry = 0;
- int stop_retry = 0;
- /*
- * Start polling the GSI channel for
- * duration = tm * GSI_CMD_POLL_CNT.
- * We need to do polling of gsi state for improving debugability
- * of gsi hw state.
- */
- for (poll_cnt = 0;
- poll_cnt < GSI_CMD_POLL_CNT;
- poll_cnt++) {
- res = wait_for_completion_timeout(&ctx->compl,
- msecs_to_jiffies(tm));
- /* Interrupt received, return */
- if (res != 0)
- return;
- type = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(gsi_ctx->per.ee));
- gsi_pending_intr = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
- /* Update the channel state only if interrupt was raised
- * on praticular channel and also checking global interrupt
- * is raised for channel control.
- */
- if ((type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) &&
- ((gsi_pending_intr >> chan_hdl) & 1)) {
- /*
- * Check channel state here in case the channel is
- * already started but interrupt is not yet received.
- */
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- curr_state = (val &
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
- }
- if (op == GSI_CH_START) {
- if (curr_state == GSI_CHAN_STATE_STARTED) {
- ctx->state = curr_state;
- return;
- }
- }
- if (op == GSI_CH_STOP) {
- if (curr_state == GSI_CHAN_STATE_STOPPED)
- stop_retry++;
- else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
- stop_in_proc_retry++;
- }
- /* if interrupt marked reg after poll count reaching to max
- * keep loop to continue reach max stop proc and max stop count.
- */
- if (stop_retry == 1 || stop_in_proc_retry == 1)
- poll_cnt = 0;
- /* If stop channel retry reached to max count
- * clear the pending interrupt, if channel already stopped.
- */
- if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
- gsi_writel(gsi_pending_intr, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
- ctx->state = curr_state;
- return;
- }
- /* If channel state stop in progress case no need
- * to wait for long time.
- */
- if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
- ctx->state = curr_state;
- return;
- }
- GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
- chan_hdl,
- type,
- ctx->state,
- gsi_pending_intr);
- }
- GSIDBG("invalidating the channel state when timeout happens\n");
- ctx->state = curr_state;
- }
- static void gsi_handle_ch_ctrl(int ee)
- {
- uint32_t ch;
- int i;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- ch = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
- gsi_writel(ch, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
- GSIDBG("ch %x\n", ch);
- for (i = 0; i < GSI_STTS_REG_BITS; i++) {
- if ((1 << i) & ch) {
- if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
- GSIERR("invalid channel %d\n", i);
- break;
- }
- ctx = &gsi_ctx->chan[i];
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
- ctx->state = (val &
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
- GSIDBG("ch %u state updated to %u\n", i, ctx->state);
- complete(&ctx->compl);
- gsi_ctx->ch_dbg[i].cmd_completed++;
- }
- }
- }
- static void gsi_handle_ev_ctrl(int ee)
- {
- uint32_t ch;
- int i;
- uint32_t val;
- struct gsi_evt_ctx *ctx;
- ch = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
- gsi_writel(ch, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
- GSIDBG("ev %x\n", ch);
- for (i = 0; i < GSI_STTS_REG_BITS; i++) {
- if ((1 << i) & ch) {
- if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
- GSIERR("invalid event %d\n", i);
- break;
- }
- ctx = &gsi_ctx->evtr[i];
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
- ctx->state = (val &
- GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
- GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
- GSIDBG("evt %u state updated to %u\n", i, ctx->state);
- complete(&ctx->compl);
- }
- }
- }
- static void gsi_handle_glob_err(uint32_t err)
- {
- struct gsi_log_err *log;
- struct gsi_chan_ctx *ch;
- struct gsi_evt_ctx *ev;
- struct gsi_chan_err_notify chan_notify;
- struct gsi_evt_err_notify evt_notify;
- struct gsi_per_notify per_notify;
- uint32_t val;
- enum gsi_err_type err_type;
- log = (struct gsi_log_err *)&err;
- GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
- log->virt_idx);
- GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
- log->arg2, log->arg3);
- err_type = log->err_type;
- /*
- * These are errors thrown by hardware. We need
- * BUG_ON() to capture the hardware state right
- * when it is unexpected.
- */
- switch (err_type) {
- case GSI_ERR_TYPE_GLOB:
- per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
- per_notify.user_data = gsi_ctx->per.user_data;
- per_notify.data.err_desc = err & 0xFFFF;
- gsi_ctx->per.notify_cb(&per_notify);
- break;
- case GSI_ERR_TYPE_CHAN:
- if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
- GSIERR("Unexpected ch %d\n", log->virt_idx);
- return;
- }
- ch = &gsi_ctx->chan[log->virt_idx];
- chan_notify.chan_user_data = ch->props.chan_user_data;
- chan_notify.err_desc = err & 0xFFFF;
- if (log->code == GSI_INVALID_TRE_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
- gsi_ctx->per.ee));
- ch->state = (val &
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
- GSIDBG("ch %u state updated to %u\n", log->virt_idx,
- ch->state);
- ch->stats.invalid_tre_error++;
- if (ch->state == GSI_CHAN_STATE_ERROR) {
- GSIERR("Unexpected channel state %d\n",
- ch->state);
- GSI_ASSERT();
- }
- chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
- } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
- } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
- complete(&ch->compl);
- } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
- chan_notify.evt_id =
- GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
- } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- chan_notify.evt_id =
- GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
- } else if (log->code == GSI_HWO_1_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
- } else {
- GSIERR("unexpected event log code %d\n", log->code);
- GSI_ASSERT();
- }
- ch->props.err_cb(&chan_notify);
- break;
- case GSI_ERR_TYPE_EVT:
- if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
- GSIERR("Unexpected ev %d\n", log->virt_idx);
- return;
- }
- ev = &gsi_ctx->evtr[log->virt_idx];
- evt_notify.user_data = ev->props.user_data;
- evt_notify.err_desc = err & 0xFFFF;
- if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
- } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
- complete(&ev->compl);
- } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
- evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
- } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
- if (log->ee != gsi_ctx->per.ee) {
- GSIERR("unexpected EE in event %d\n", log->ee);
- GSI_ASSERT();
- }
- evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
- } else {
- GSIERR("unexpected event log code %d\n", log->code);
- GSI_ASSERT();
- }
- ev->props.err_cb(&evt_notify);
- break;
- }
- }
- static void gsi_handle_gp_int1(void)
- {
- complete(&gsi_ctx->gen_ee_cmd_compl);
- }
- static void gsi_handle_glob_ee(int ee)
- {
- uint32_t val;
- uint32_t err;
- struct gsi_per_notify notify;
- uint32_t clr = ~0;
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
- notify.user_data = gsi_ctx->per.user_data;
- if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
- err = gsi_readl(gsi_ctx->base +
- GSI_EE_n_ERROR_LOG_OFFS(ee));
- if (gsi_ctx->per.ver >= GSI_VER_1_2)
- gsi_writel(0, gsi_ctx->base +
- GSI_EE_n_ERROR_LOG_OFFS(ee));
- gsi_writel(clr, gsi_ctx->base +
- GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
- gsi_handle_glob_err(err);
- }
- if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK)
- gsi_handle_gp_int1();
- if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
- notify.evt_id = GSI_PER_EVT_GLOB_GP2;
- gsi_ctx->per.notify_cb(¬ify);
- }
- if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
- notify.evt_id = GSI_PER_EVT_GLOB_GP3;
- gsi_ctx->per.notify_cb(¬ify);
- }
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
- }
- static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
- {
- ctx->wp_local += ctx->elem_sz;
- if (ctx->wp_local == ctx->end)
- ctx->wp_local = ctx->base;
- }
- static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
- {
- ctx->rp_local += ctx->elem_sz;
- if (ctx->rp_local == ctx->end)
- ctx->rp_local = ctx->base;
- }
- uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
- {
- WARN_ON(addr < ctx->base || addr >= ctx->end);
- return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
- }
- static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
- uint64_t addr2)
- {
- uint32_t addr_diff;
- GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
- ctx->base, ctx->end);
- if (addr1 < ctx->base || addr1 >= ctx->end) {
- GSIERR("address = 0x%llx not in range\n", addr1);
- GSI_ASSERT();
- }
- if (addr2 < ctx->base || addr2 >= ctx->end) {
- GSIERR("address = 0x%llx not in range\n", addr2);
- GSI_ASSERT();
- }
- addr_diff = (uint32_t)(addr2 - addr1);
- if (addr1 < addr2)
- return addr_diff / ctx->elem_sz;
- else
- return (addr_diff + ctx->len) / ctx->elem_sz;
- }
- static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
- struct gsi_chan_xfer_notify *notify, bool callback)
- {
- uint32_t ch_id;
- struct gsi_chan_ctx *ch_ctx;
- uint16_t rp_idx;
- uint64_t rp;
- ch_id = evt->chid;
- if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
- GSIERR("Unexpected ch %d\n", ch_id);
- return;
- }
- ch_ctx = &gsi_ctx->chan[ch_id];
- if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
- return;
- if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
- rp = evt->xfer_ptr;
- if (ch_ctx->ring.rp_local != rp) {
- ch_ctx->stats.completed +=
- gsi_get_complete_num(&ch_ctx->ring,
- ch_ctx->ring.rp_local, rp);
- ch_ctx->ring.rp_local = rp;
- }
- /* the element at RP is also processed */
- gsi_incr_ring_rp(&ch_ctx->ring);
- ch_ctx->ring.rp = ch_ctx->ring.rp_local;
- rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
- notify->veid = GSI_VEID_DEFAULT;
- } else {
- rp_idx = evt->cookie;
- notify->veid = evt->veid;
- }
- ch_ctx->stats.completed++;
- WARN_ON(!ch_ctx->user_data[rp_idx].valid);
- notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
- ch_ctx->user_data[rp_idx].valid = false;
- notify->chan_user_data = ch_ctx->props.chan_user_data;
- notify->evt_id = evt->code;
- notify->bytes_xfered = evt->len;
- if (callback) {
- if (atomic_read(&ch_ctx->poll_mode)) {
- GSIERR("Calling client callback in polling mode\n");
- WARN_ON(1);
- }
- ch_ctx->props.xfer_cb(notify);
- }
- }
- static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
- struct gsi_chan_xfer_notify *notify, bool callback)
- {
- struct gsi_xfer_compl_evt *evt;
- evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
- ctx->ring.rp_local - ctx->ring.base);
- gsi_process_chan(evt, notify, callback);
- gsi_incr_ring_rp(&ctx->ring);
- /* recycle this element */
- gsi_incr_ring_wp(&ctx->ring);
- ctx->stats.completed++;
- }
- static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
- {
- uint32_t val;
- ctx->ring.wp = ctx->ring.wp_local;
- val = (ctx->ring.wp_local &
- GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
- GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
- gsi_ctx->per.ee));
- }
- static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
- {
- uint32_t val;
- /*
- * allocate new events for this channel first
- * before submitting the new TREs.
- * for TO_GSI channels the event ring doorbell is rang as part of
- * interrupt handling.
- */
- if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
- gsi_ring_evt_doorbell(ctx->evtr);
- ctx->ring.wp = ctx->ring.wp_local;
- val = (ctx->ring.wp_local &
- GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
- GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
- gsi_ctx->per.ee));
- }
- static void gsi_handle_ieob(int ee)
- {
- uint32_t ch;
- int i;
- uint64_t rp;
- struct gsi_evt_ctx *ctx;
- struct gsi_chan_xfer_notify notify;
- unsigned long flags;
- unsigned long cntr;
- uint32_t msk;
- bool empty;
- ch = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
- msk = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
- gsi_writel(ch & msk, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
- for (i = 0; i < GSI_STTS_REG_BITS; i++) {
- if ((1 << i) & ch & msk) {
- if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
- GSIERR("invalid event %d\n", i);
- break;
- }
- ctx = &gsi_ctx->evtr[i];
- /*
- * Don't handle MSI interrupts, only handle IEOB
- * IRQs
- */
- if (ctx->props.intr == GSI_INTR_MSI)
- continue;
- if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
- GSIERR("Unexpected irq intf %d\n",
- ctx->props.intf);
- GSI_ASSERT();
- }
- spin_lock_irqsave(&ctx->ring.slock, flags);
- check_again:
- cntr = 0;
- empty = true;
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
- rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
- ctx->ring.rp = rp;
- while (ctx->ring.rp_local != rp) {
- ++cntr;
- if (ctx->props.exclusive &&
- atomic_read(&ctx->chan->poll_mode)) {
- cntr = 0;
- break;
- }
- gsi_process_evt_re(ctx, ¬ify, true);
- empty = false;
- }
- if (!empty)
- gsi_ring_evt_doorbell(ctx);
- if (cntr != 0)
- goto check_again;
- spin_unlock_irqrestore(&ctx->ring.slock, flags);
- }
- }
- }
- static void gsi_handle_inter_ee_ch_ctrl(int ee)
- {
- uint32_t ch;
- int i;
- ch = gsi_readl(gsi_ctx->base +
- GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
- gsi_writel(ch, gsi_ctx->base +
- GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
- for (i = 0; i < GSI_STTS_REG_BITS; i++) {
- if ((1 << i) & ch) {
- /* not currently expected */
- GSIERR("ch %u was inter-EE changed\n", i);
- }
- }
- }
- static void gsi_handle_inter_ee_ev_ctrl(int ee)
- {
- uint32_t ch;
- int i;
- ch = gsi_readl(gsi_ctx->base +
- GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
- gsi_writel(ch, gsi_ctx->base +
- GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
- for (i = 0; i < GSI_STTS_REG_BITS; i++) {
- if ((1 << i) & ch) {
- /* not currently expected */
- GSIERR("evt %u was inter-EE changed\n", i);
- }
- }
- }
- static void gsi_handle_general(int ee)
- {
- uint32_t val;
- struct gsi_per_notify notify;
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
- notify.user_data = gsi_ctx->per.user_data;
- if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
- notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
- if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
- notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
- if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
- notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
- if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
- notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
- if (gsi_ctx->per.notify_cb)
- gsi_ctx->per.notify_cb(¬ify);
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
- }
- #define GSI_ISR_MAX_ITER 50
- static void gsi_handle_irq(void)
- {
- uint32_t type;
- int ee = gsi_ctx->per.ee;
- unsigned long cnt = 0;
- while (1) {
- type = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
- if (!type)
- break;
- GSIDBG_LOW("type 0x%x\n", type);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
- gsi_handle_ch_ctrl(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
- gsi_handle_ev_ctrl(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
- gsi_handle_glob_ee(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
- gsi_handle_ieob(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
- gsi_handle_inter_ee_ch_ctrl(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
- gsi_handle_inter_ee_ev_ctrl(ee);
- if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
- gsi_handle_general(ee);
- if (++cnt > GSI_ISR_MAX_ITER) {
- /*
- * Max number of spurious interrupts from hardware.
- * Unexpected hardware state.
- */
- GSIERR("Too many spurious interrupt from GSI HW\n");
- GSI_ASSERT();
- }
- }
- }
- static irqreturn_t gsi_isr(int irq, void *ctxt)
- {
- if (gsi_ctx->per.req_clk_cb) {
- bool granted = false;
- gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
- if (granted) {
- gsi_handle_irq();
- gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
- }
- } else if (!gsi_ctx->per.clk_status_cb()) {
- /* we only want to capture the gsi isr storm here */
- if (atomic_read(&gsi_ctx->num_unclock_irq) ==
- GSI_IRQ_STORM_THR)
- gsi_ctx->per.enable_clk_bug_on();
- atomic_inc(&gsi_ctx->num_unclock_irq);
- return IRQ_HANDLED;
- } else {
- atomic_set(&gsi_ctx->num_unclock_irq, 0);
- gsi_handle_irq();
- }
- return IRQ_HANDLED;
- }
- static uint32_t gsi_get_max_channels(enum gsi_ver ver)
- {
- uint32_t reg = 0;
- switch (ver) {
- case GSI_VER_ERR:
- case GSI_VER_MAX:
- GSIERR("GSI version is not supported %d\n", ver);
- WARN_ON(1);
- break;
- case GSI_VER_1_0:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
- GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
- break;
- case GSI_VER_1_2:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
- reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
- GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
- break;
- case GSI_VER_1_3:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- case GSI_VER_2_0:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- case GSI_VER_2_2:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- case GSI_VER_2_5:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- case GSI_VER_2_7:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- case GSI_VER_2_9:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
- break;
- }
- GSIDBG("max channels %d\n", reg);
- return reg;
- }
- static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
- {
- uint32_t reg = 0;
- switch (ver) {
- case GSI_VER_ERR:
- case GSI_VER_MAX:
- GSIERR("GSI version is not supported %d\n", ver);
- WARN_ON(1);
- break;
- case GSI_VER_1_0:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
- GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
- break;
- case GSI_VER_1_2:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
- reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
- GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
- break;
- case GSI_VER_1_3:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- case GSI_VER_2_0:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- case GSI_VER_2_2:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- case GSI_VER_2_5:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- case GSI_VER_2_7:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- case GSI_VER_2_9:
- reg = gsi_readl(gsi_ctx->base +
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- reg = (reg &
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
- GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
- break;
- }
- GSIDBG("max event rings %d\n", reg);
- return reg;
- }
- int gsi_complete_clk_grant(unsigned long dev_hdl)
- {
- unsigned long flags;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->per_registered) {
- GSIERR("no client registered\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (dev_hdl != (uintptr_t)gsi_ctx) {
- GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
- gsi_ctx);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- spin_lock_irqsave(&gsi_ctx->slock, flags);
- gsi_handle_irq();
- gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
- spin_unlock_irqrestore(&gsi_ctx->slock, flags);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_complete_clk_grant);
- int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- gsi_ctx->base = devm_ioremap_nocache(
- gsi_ctx->dev, gsi_base_addr, gsi_size);
- if (!gsi_ctx->base) {
- GSIERR("failed to map access to GSI HW\n");
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
- &gsi_base_addr,
- gsi_ctx->base,
- gsi_size);
- return 0;
- }
- EXPORT_SYMBOL(gsi_map_base);
- int gsi_unmap_base(void)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->base) {
- GSIERR("access to GSI HW has not been mapped\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
- gsi_ctx->base = NULL;
- return 0;
- }
- EXPORT_SYMBOL(gsi_unmap_base);
- int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
- {
- int res;
- uint32_t val;
- int needed_reg_ver;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || !dev_hdl) {
- GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
- GSIERR("bad params gsi_ver=%d\n", props->ver);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (!props->notify_cb) {
- GSIERR("notify callback must be provided\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->req_clk_cb && !props->rel_clk_cb) {
- GSIERR("rel callback must be provided\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->per_registered) {
- GSIERR("per already registered\n");
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- switch (props->ver) {
- case GSI_VER_1_0:
- case GSI_VER_1_2:
- case GSI_VER_1_3:
- case GSI_VER_2_0:
- case GSI_VER_2_2:
- needed_reg_ver = GSI_REGISTER_VER_1;
- break;
- case GSI_VER_2_5:
- case GSI_VER_2_7:
- case GSI_VER_2_9:
- needed_reg_ver = GSI_REGISTER_VER_2;
- break;
- case GSI_VER_ERR:
- case GSI_VER_MAX:
- default:
- GSIERR("GSI version is not supported %d\n", props->ver);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (needed_reg_ver != GSI_REGISTER_VER_CURRENT) {
- GSIERR("Invalid register version. current=%d, needed=%d\n",
- GSI_REGISTER_VER_CURRENT, needed_reg_ver);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- GSIDBG("gsi ver %d register ver %d needed register ver %d\n",
- props->ver, GSI_REGISTER_VER_CURRENT, needed_reg_ver);
- spin_lock_init(&gsi_ctx->slock);
- if (props->intr == GSI_INTR_IRQ) {
- if (!props->irq) {
- GSIERR("bad irq specified %u\n", props->irq);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- /*
- * On a real UE, there are two separate interrupt
- * vectors that get directed toward the GSI/IPA
- * drivers. They are handled by gsi_isr() and
- * (ipa_isr() or ipa3_isr()) respectively. In the
- * emulation environment, this is not the case;
- * instead, interrupt vectors are routed to the
- * emualation hardware's interrupt controller, which
- * in turn, forwards a single interrupt to the GSI/IPA
- * driver. When the new interrupt vector is received,
- * the driver needs to probe the interrupt
- * controller's registers so see if one, the other, or
- * both interrupts have occurred. Given the above, we
- * now need to handle both situations, namely: the
- * emulator's and the real UE.
- */
- if (running_emulation) {
- /*
- * New scheme involving the emulator's
- * interrupt controller.
- */
- res = devm_request_threaded_irq(
- gsi_ctx->dev,
- props->irq,
- /* top half handler to follow */
- emulator_hard_irq_isr,
- /* threaded bottom half handler to follow */
- emulator_soft_irq_isr,
- IRQF_SHARED,
- "emulator_intcntrlr",
- gsi_ctx);
- } else {
- /*
- * Traditional scheme used on the real UE.
- */
- res = devm_request_irq(gsi_ctx->dev, props->irq,
- gsi_isr,
- props->req_clk_cb ? IRQF_TRIGGER_RISING :
- IRQF_TRIGGER_HIGH,
- "gsi",
- gsi_ctx);
- }
- if (res) {
- GSIERR(
- "failed to register isr for %u\n",
- props->irq);
- return -GSI_STATUS_ERROR;
- }
- GSIDBG(
- "succeeded to register isr for %u\n",
- props->irq);
- res = enable_irq_wake(props->irq);
- if (res)
- GSIERR("failed to enable wake irq %u\n", props->irq);
- else
- GSIERR("GSI irq is wake enabled %u\n", props->irq);
- } else {
- GSIERR("do not support interrupt type %u\n", props->intr);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- /*
- * If base not previously mapped via gsi_map_base(), map it
- * now...
- */
- if (!gsi_ctx->base) {
- res = gsi_map_base(props->phys_addr, props->size);
- if (res)
- return res;
- }
- if (running_emulation) {
- GSIDBG("GSI SW ver register value 0x%x\n",
- gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
- gsi_ctx->intcntrlr_mem_size =
- props->emulator_intcntrlr_size;
- gsi_ctx->intcntrlr_base =
- devm_ioremap_nocache(
- gsi_ctx->dev,
- props->emulator_intcntrlr_addr,
- props->emulator_intcntrlr_size);
- if (!gsi_ctx->intcntrlr_base) {
- GSIERR(
- "failed to remap emulator's interrupt controller HW\n");
- gsi_unmap_base();
- devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- GSIDBG(
- "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
- &(props->emulator_intcntrlr_addr),
- gsi_ctx->intcntrlr_base,
- props->emulator_intcntrlr_size);
- gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
- gsi_ctx->intcntrlr_client_isr =
- props->emulator_intcntrlr_client_isr;
- }
- gsi_ctx->per = *props;
- gsi_ctx->per_registered = true;
- mutex_init(&gsi_ctx->mlock);
- atomic_set(&gsi_ctx->num_chan, 0);
- atomic_set(&gsi_ctx->num_evt_ring, 0);
- gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
- if (gsi_ctx->max_ch == 0) {
- gsi_unmap_base();
- if (running_emulation)
- devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
- gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
- devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
- GSIERR("failed to get max channels\n");
- return -GSI_STATUS_ERROR;
- }
- gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
- if (gsi_ctx->max_ev == 0) {
- gsi_unmap_base();
- if (running_emulation)
- devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
- gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
- devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
- GSIERR("failed to get max event rings\n");
- return -GSI_STATUS_ERROR;
- }
- if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
- GSIERR("max event rings are beyond absolute maximum\n");
- return -GSI_STATUS_ERROR;
- }
- if (props->mhi_er_id_limits_valid &&
- props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
- gsi_unmap_base();
- if (running_emulation)
- devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
- gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
- devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
- GSIERR("MHI event ring start id %u is beyond max %u\n",
- props->mhi_er_id_limits[0], gsi_ctx->max_ev);
- return -GSI_STATUS_ERROR;
- }
- gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
- /* exclude reserved mhi events */
- if (props->mhi_er_id_limits_valid)
- gsi_ctx->evt_bmap |=
- ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
- ((1 << (props->mhi_er_id_limits[0])) - 1);
- /*
- * enable all interrupts but GSI_BREAK_POINT.
- * Inter EE commands / interrupt are no supported.
- */
- __gsi_config_type_irq(props->ee, ~0, ~0);
- __gsi_config_ch_irq(props->ee, ~0, ~0);
- __gsi_config_evt_irq(props->ee, ~0, ~0);
- __gsi_config_ieob_irq(props->ee, ~0, ~0);
- __gsi_config_glob_irq(props->ee, ~0, ~0);
- __gsi_config_gen_irq(props->ee, ~0,
- ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
- gsi_writel(props->intr, gsi_ctx->base +
- GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
- /* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
- if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
- (props->intr != GSI_INTR_MSI)) {
- gsi_writel(0, gsi_ctx->base +
- GSI_EE_n_CNTXT_MSI_BASE_LSB(gsi_ctx->per.ee));
- gsi_writel(0, gsi_ctx->base +
- GSI_EE_n_CNTXT_MSI_BASE_MSB(gsi_ctx->per.ee));
- }
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
- if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
- gsi_ctx->enabled = true;
- else
- GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
- if (gsi_ctx->per.ver >= GSI_VER_1_2)
- gsi_writel(0, gsi_ctx->base +
- GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
- if (running_emulation) {
- /*
- * Set up the emulator's interrupt controller...
- */
- res = setup_emulator_cntrlr(
- gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
- if (res != 0) {
- gsi_unmap_base();
- devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
- gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
- devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
- GSIERR("setup_emulator_cntrlr() failed\n");
- return res;
- }
- }
- *dev_hdl = (uintptr_t)gsi_ctx;
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_register_device);
- int gsi_write_device_scratch(unsigned long dev_hdl,
- struct gsi_device_scratch *val)
- {
- unsigned int max_usb_pkt_size = 0;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->per_registered) {
- GSIERR("no client registered\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (dev_hdl != (uintptr_t)gsi_ctx) {
- GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
- gsi_ctx);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (val->max_usb_pkt_size_valid &&
- val->max_usb_pkt_size != 1024 &&
- val->max_usb_pkt_size != 512 &&
- val->max_usb_pkt_size != 64) {
- GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
- val->max_usb_pkt_size);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- mutex_lock(&gsi_ctx->mlock);
- if (val->mhi_base_chan_idx_valid)
- gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
- val->mhi_base_chan_idx;
- if (val->max_usb_pkt_size_valid) {
- max_usb_pkt_size = 2;
- if (val->max_usb_pkt_size > 64)
- max_usb_pkt_size =
- (val->max_usb_pkt_size == 1024) ? 1 : 0;
- gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
- }
- gsi_writel(gsi_ctx->scratch.word0.val,
- gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- mutex_unlock(&gsi_ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_device_scratch);
- int gsi_deregister_device(unsigned long dev_hdl, bool force)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->per_registered) {
- GSIERR("no client registered\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (dev_hdl != (uintptr_t)gsi_ctx) {
- GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
- gsi_ctx);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (!force && atomic_read(&gsi_ctx->num_chan)) {
- GSIERR("cannot deregister %u channels are still connected\n",
- atomic_read(&gsi_ctx->num_chan));
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
- GSIERR("cannot deregister %u events are still connected\n",
- atomic_read(&gsi_ctx->num_evt_ring));
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- /* disable all interrupts */
- __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
- devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
- gsi_unmap_base();
- memset(gsi_ctx, 0, sizeof(*gsi_ctx));
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_deregister_device);
- static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
- uint8_t evt_id, unsigned int ee)
- {
- uint32_t val;
- GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
- props->re_size);
- val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
- GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
- ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
- GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
- ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
- & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
- if (gsi_ctx->per.ver >= GSI_VER_2_9) {
- val = (props->ring_len &
- GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
- << GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
- } else {
- val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
- << GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
- }
- val = (props->ring_base_addr &
- GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
- val = ((props->ring_base_addr >> 32) &
- GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
- val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
- GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
- ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
- GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
- val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
- val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
- val = ((props->msi_addr >> 32) &
- GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
- val = (props->rp_update_addr &
- GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
- val = ((props->rp_update_addr >> 32) &
- GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
- GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
- }
- static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
- struct gsi_ring_ctx *ctx)
- {
- ctx->base_va = (uintptr_t)props->ring_base_vaddr;
- ctx->base = props->ring_base_addr;
- ctx->wp = ctx->base;
- ctx->rp = ctx->base;
- ctx->wp_local = ctx->base;
- ctx->rp_local = ctx->base;
- ctx->len = props->ring_len;
- ctx->elem_sz = props->re_size;
- ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
- ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
- }
- static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
- {
- unsigned long flags;
- uint32_t val;
- spin_lock_irqsave(&ctx->ring.slock, flags);
- memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
- ctx->ring.wp_local = ctx->ring.base +
- ctx->ring.max_num_elem * ctx->ring.elem_sz;
- /* write order MUST be MSB followed by LSB */
- val = ((ctx->ring.wp_local >> 32) &
- GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
- GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
- gsi_ctx->per.ee));
- gsi_ring_evt_doorbell(ctx);
- spin_unlock_irqrestore(&ctx->ring.slock, flags);
- }
- static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
- {
- unsigned long flags;
- spin_lock_irqsave(&ctx->ring.slock, flags);
- if (ctx->ring.base_va)
- memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
- ctx->ring.wp_local = ctx->ring.base +
- ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
- gsi_ring_evt_doorbell(ctx);
- spin_unlock_irqrestore(&ctx->ring.slock, flags);
- }
- static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
- {
- uint64_t ra;
- if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
- props->ring_len % 4) ||
- (props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
- props->ring_len % 8) ||
- (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
- props->ring_len % 16)) {
- GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
- props->ring_len, props->re_size);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ra = props->ring_base_addr;
- do_div(ra, roundup_pow_of_two(props->ring_len));
- if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
- GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
- props->ring_base_addr,
- roundup_pow_of_two(props->ring_len));
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
- !props->ring_base_vaddr) {
- GSIERR("protocol %u requires ring base VA\n", props->intf);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
- (!props->evchid_valid ||
- props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
- props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
- GSIERR("MHI requires evchid valid=%d val=%u\n",
- props->evchid_valid, props->evchid);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
- props->evchid_valid) {
- GSIERR("protocol %u cannot specify evchid\n", props->intf);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (!props->err_cb) {
- GSIERR("err callback must be provided\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- return GSI_STATUS_SUCCESS;
- }
- /**
- * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
- * by IPA driver. Need to do this in GSI since only GSI knows which TRE
- * are being used or not. However, IPA is the one that does cleaning,
- * therefore we pass a callback from IPA and call it using params from GSI
- *
- * @chan_hdl: hdl of the gsi channel user data array to be cleaned
- * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
- * @chan_user_data: ipa_sys_context of the gsi_channel
- * @xfer_uder_data: user data array element (rx_pkt wrapper)
- *
- * Returns: 0 on success, negative on failure
- */
- static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
- void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
- {
- struct gsi_chan_ctx *ctx;
- uint64_t i;
- uint16_t rp_idx;
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- /* for coalescing, traverse the whole array */
- if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
- size_t user_data_size =
- ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
- for (i = 0; i < user_data_size; i++) {
- if (ctx->user_data[i].valid)
- cleanup_cb(ctx->props.chan_user_data,
- ctx->user_data[i].p);
- }
- } else {
- /* for non-coalescing, clean between RP and WP */
- while (ctx->ring.rp_local != ctx->ring.wp_local) {
- rp_idx = gsi_find_idx_from_addr(&ctx->ring,
- ctx->ring.rp_local);
- WARN_ON(!ctx->user_data[rp_idx].valid);
- cleanup_cb(ctx->props.chan_user_data,
- ctx->user_data[rp_idx].p);
- gsi_incr_ring_rp(&ctx->ring);
- }
- }
- return 0;
- }
- int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
- unsigned long *evt_ring_hdl)
- {
- unsigned long evt_id;
- enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
- uint32_t val;
- struct gsi_evt_ctx *ctx;
- int res;
- int ee;
- unsigned long flags;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
- GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
- props, dev_hdl, evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_validate_evt_ring_props(props)) {
- GSIERR("invalid params\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (!props->evchid_valid) {
- mutex_lock(&gsi_ctx->mlock);
- evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
- sizeof(unsigned long) * BITS_PER_BYTE);
- if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
- GSIERR("failed to alloc event ID\n");
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- set_bit(evt_id, &gsi_ctx->evt_bmap);
- mutex_unlock(&gsi_ctx->mlock);
- } else {
- evt_id = props->evchid;
- }
- GSIDBG("Using %lu as virt evt id\n", evt_id);
- ctx = &gsi_ctx->evtr[evt_id];
- memset(ctx, 0, sizeof(*ctx));
- mutex_init(&ctx->mlock);
- init_completion(&ctx->compl);
- atomic_set(&ctx->chan_ref_cnt, 0);
- ctx->props = *props;
- mutex_lock(&gsi_ctx->mlock);
- val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
- GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
- ee = gsi_ctx->per.ee;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_CMD_OFFS(ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("evt_id=%lu timed out\n", evt_id);
- if (!props->evchid_valid)
- clear_bit(evt_id, &gsi_ctx->evt_bmap);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("evt_id=%lu allocation failed state=%u\n",
- evt_id, ctx->state);
- if (!props->evchid_valid)
- clear_bit(evt_id, &gsi_ctx->evt_bmap);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
- spin_lock_init(&ctx->ring.slock);
- gsi_init_evt_ring(props, &ctx->ring);
- ctx->id = evt_id;
- *evt_ring_hdl = evt_id;
- atomic_inc(&gsi_ctx->num_evt_ring);
- if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
- gsi_prime_evt_ring(ctx);
- else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
- gsi_prime_evt_ring_wdi(ctx);
- mutex_unlock(&gsi_ctx->mlock);
- spin_lock_irqsave(&gsi_ctx->slock, flags);
- gsi_writel(1 << evt_id, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
- /* enable ieob interrupts for GPI, enable MSI interrupts */
- if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
- (props->intr != GSI_INTR_MSI))
- __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
- else
- __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
- spin_unlock_irqrestore(&gsi_ctx->slock, flags);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_alloc_evt_ring);
- static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
- union __packed gsi_evt_scratch val)
- {
- gsi_writel(val.data.word1, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- gsi_writel(val.data.word2, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- }
- int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
- union __packed gsi_evt_scratch val)
- {
- struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n",
- gsi_ctx->evtr[evt_ring_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&ctx->mlock);
- ctx->scratch = val;
- __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
- int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
- {
- uint32_t val;
- enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
- struct gsi_evt_ctx *ctx;
- int res;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev ||
- evt_ring_hdl >= GSI_EVT_RING_MAX) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (atomic_read(&ctx->chan_ref_cnt)) {
- GSIERR("%d channels still using this event ring\n",
- atomic_read(&ctx->chan_ref_cnt));
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
- GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
- GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
- ctx->state);
- /*
- * IPA Hardware returned GSI RING not allocated, which is
- * unexpected hardware state.
- */
- GSI_ASSERT();
- }
- mutex_unlock(&gsi_ctx->mlock);
- if (!ctx->props.evchid_valid) {
- mutex_lock(&gsi_ctx->mlock);
- clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
- mutex_unlock(&gsi_ctx->mlock);
- }
- atomic_dec(&gsi_ctx->num_evt_ring);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_dealloc_evt_ring);
- int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
- uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
- {
- struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!db_addr_wp_msb || !db_addr_wp_lsb) {
- GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
- db_addr_wp_lsb);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n",
- gsi_ctx->evtr[evt_ring_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
- GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
- *db_addr_wp_msb = gsi_ctx->per.phys_addr +
- GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
- int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
- {
- struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n",
- gsi_ctx->evtr[evt_ring_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx->ring.wp_local = value;
- gsi_ring_evt_doorbell(ctx);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_ring_evt_ring_db);
- int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
- {
- struct gsi_chan_ctx *ctx;
- uint32_t val;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_STARTED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx->ring.wp_local = value;
- /* write MSB first */
- val = ((ctx->ring.wp_local >> 32) &
- GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
- GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
- gsi_ctx->per.ee));
- gsi_ring_chan_doorbell(ctx);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_ring_ch_ring_db);
- int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
- {
- uint32_t val;
- enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
- struct gsi_evt_ctx *ctx;
- int res;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
- GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
- ctx->state);
- /*
- * IPA Hardware returned GSI RING not allocated, which is
- * unexpected. Indicates hardware instability.
- */
- GSI_ASSERT();
- }
- gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
- gsi_init_evt_ring(&ctx->props, &ctx->ring);
- /* restore scratch */
- __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
- if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
- gsi_prime_evt_ring(ctx);
- if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
- gsi_prime_evt_ring_wdi(ctx);
- mutex_unlock(&gsi_ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_reset_evt_ring);
- int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
- struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
- {
- struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || !scr) {
- GSIERR("bad params props=%pK scr=%pK\n", props, scr);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&ctx->mlock);
- *props = ctx->props;
- *scr = ctx->scratch;
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
- int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
- struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
- {
- struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || gsi_validate_evt_ring_props(props)) {
- GSIERR("bad params props=%pK\n", props);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->props.exclusive != props->exclusive) {
- GSIERR("changing immutable fields not supported\n");
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&ctx->mlock);
- ctx->props = *props;
- if (scr)
- ctx->scratch = *scr;
- mutex_unlock(&ctx->mlock);
- return gsi_reset_evt_ring(evt_ring_hdl);
- }
- EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
- static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
- unsigned int ee)
- {
- uint32_t val;
- val =
- (((props->low_weight <<
- GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
- GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
- ((props->max_prefetch <<
- GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
- GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
- ((props->use_db_eng <<
- GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
- GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
- if (gsi_ctx->per.ver >= GSI_VER_2_0)
- val |= ((props->prefetch_mode <<
- GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT)
- & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK);
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
- }
- static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props,
- unsigned int ee)
- {
- uint32_t val;
- val =
- (((props->low_weight <<
- GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
- ((props->max_prefetch <<
- GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
- ((props->use_db_eng <<
- GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
- ((props->prefetch_mode <<
- GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
- ((props->empty_lvl_threshold <<
- GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
- }
- static void gsi_program_chan_ctx_qos_v2_9(struct gsi_chan_props *props,
- unsigned int ee)
- {
- uint32_t val;
- val =
- (((props->low_weight <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
- ((props->max_prefetch <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
- ((props->use_db_eng <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
- ((props->prefetch_mode <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
- ((props->empty_lvl_threshold <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK) |
- ((props->db_in_bytes <<
- GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_SHFT) &
- GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_V2_9_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
- }
- static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
- uint8_t erindex)
- {
- uint32_t val;
- uint32_t prot;
- uint32_t prot_msb;
- switch (props->prot) {
- case GSI_CHAN_PROT_MHI:
- case GSI_CHAN_PROT_XHCI:
- case GSI_CHAN_PROT_GPI:
- case GSI_CHAN_PROT_XDCI:
- case GSI_CHAN_PROT_WDI2:
- case GSI_CHAN_PROT_WDI3:
- case GSI_CHAN_PROT_GCI:
- case GSI_CHAN_PROT_MHIP:
- prot_msb = 0;
- break;
- case GSI_CHAN_PROT_AQC:
- case GSI_CHAN_PROT_11AD:
- prot_msb = 1;
- break;
- default:
- GSIERR("Unsupported protocol %d\n", props->prot);
- WARN_ON(1);
- return;
- }
- prot = props->prot;
- val = ((prot <<
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) &
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK);
- if (gsi_ctx->per.ver >= GSI_VER_2_5) {
- val |= ((prot_msb <<
- GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) &
- GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK);
- }
- val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
- GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
- ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
- GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
- ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
- & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
- if (gsi_ctx->per.ver >= GSI_VER_2_9) {
- val = (props->ring_len &
- GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
- << GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_OFFS(
- props->ch_id, ee));
- } else {
- val = (props->ring_len &
- GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
- << GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id,
- ee));
- }
- val = (props->ring_base_addr &
- GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
- GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
- val = ((props->ring_base_addr >> 32) &
- GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
- GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
- if (gsi_ctx->per.ver >= GSI_VER_2_9)
- gsi_program_chan_ctx_qos_v2_9(props, ee);
- else if (gsi_ctx->per.ver >= GSI_VER_2_5)
- gsi_program_chan_ctx_qos_v2_5(props, ee);
- else
- gsi_program_chan_ctx_qos(props, ee);
- }
- static void gsi_init_chan_ring(struct gsi_chan_props *props,
- struct gsi_ring_ctx *ctx)
- {
- ctx->base_va = (uintptr_t)props->ring_base_vaddr;
- ctx->base = props->ring_base_addr;
- ctx->wp = ctx->base;
- ctx->rp = ctx->base;
- ctx->wp_local = ctx->base;
- ctx->rp_local = ctx->base;
- ctx->len = props->ring_len;
- ctx->elem_sz = props->re_size;
- ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
- ctx->end = ctx->base + (ctx->max_num_elem + 1) *
- ctx->elem_sz;
- }
- static int gsi_validate_channel_props(struct gsi_chan_props *props)
- {
- uint64_t ra;
- uint64_t last;
- if (props->ch_id >= gsi_ctx->max_ch) {
- GSIERR("ch_id %u invalid\n", props->ch_id);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
- props->ring_len % 4) ||
- (props->re_size == GSI_CHAN_RE_SIZE_8B &&
- props->ring_len % 8) ||
- (props->re_size == GSI_CHAN_RE_SIZE_16B &&
- props->ring_len % 16) ||
- (props->re_size == GSI_CHAN_RE_SIZE_32B &&
- props->ring_len % 32)) {
- GSIERR("bad params ring_len %u not a multiple of re size %u\n",
- props->ring_len, props->re_size);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ra = props->ring_base_addr;
- do_div(ra, roundup_pow_of_two(props->ring_len));
- if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
- GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
- props->ring_base_addr,
- roundup_pow_of_two(props->ring_len));
- return -GSI_STATUS_INVALID_PARAMS;
- }
- last = props->ring_base_addr + props->ring_len - props->re_size;
- /* MSB should stay same within the ring */
- if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
- (last & 0xFFFFFFFF00000000ULL)) {
- GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
- props->ring_base_addr,
- props->ring_len);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->prot == GSI_CHAN_PROT_GPI &&
- !props->ring_base_vaddr) {
- GSIERR("protocol %u requires ring base VA\n", props->prot);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
- GSIERR("invalid channel low weight %u\n", props->low_weight);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
- GSIERR("xfer callback must be provided\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (!props->err_cb) {
- GSIERR("err callback must be provided\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- return GSI_STATUS_SUCCESS;
- }
- int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
- unsigned long *chan_hdl)
- {
- struct gsi_chan_ctx *ctx;
- uint32_t val;
- int res;
- int ee;
- enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
- uint8_t erindex;
- struct gsi_user_data *user_data;
- size_t user_data_size;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
- GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
- props, dev_hdl, chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_validate_channel_props(props)) {
- GSIERR("bad params\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (props->evt_ring_hdl != ~0) {
- if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
- GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (atomic_read(
- &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
- gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
- gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
- GSI_CHAN_PROT_GCI) {
- GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
- props->evt_ring_hdl, chan_hdl);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- }
- ctx = &gsi_ctx->chan[props->ch_id];
- if (ctx->allocated) {
- GSIERR("chan %d already allocated\n", props->ch_id);
- return -GSI_STATUS_NODEV;
- }
- memset(ctx, 0, sizeof(*ctx));
- /* For IPA offloaded WDI channels not required user_data pointer */
- if (props->prot != GSI_CHAN_PROT_WDI2 &&
- props->prot != GSI_CHAN_PROT_WDI3)
- user_data_size = props->ring_len / props->re_size;
- else
- user_data_size = props->re_size;
- /*
- * GCI channels might have OOO event completions up to GSI_VEID_MAX.
- * user_data needs to be large enough to accommodate those.
- * TODO: increase user data size if GSI_VEID_MAX is not enough
- */
- if (props->prot == GSI_CHAN_PROT_GCI)
- user_data_size += GSI_VEID_MAX;
- user_data = devm_kzalloc(gsi_ctx->dev,
- user_data_size * sizeof(*user_data),
- GFP_KERNEL);
- if (user_data == NULL) {
- GSIERR("context not allocated\n");
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- mutex_init(&ctx->mlock);
- init_completion(&ctx->compl);
- atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
- ctx->props = *props;
- if (gsi_ctx->per.ver != GSI_VER_2_2) {
- mutex_lock(&gsi_ctx->mlock);
- ee = gsi_ctx->per.ee;
- gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
- val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("chan_hdl=%u timed out\n", props->ch_id);
- mutex_unlock(&gsi_ctx->mlock);
- devm_kfree(gsi_ctx->dev, user_data);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("chan_hdl=%u allocation failed state=%d\n",
- props->ch_id, ctx->state);
- mutex_unlock(&gsi_ctx->mlock);
- devm_kfree(gsi_ctx->dev, user_data);
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- mutex_unlock(&gsi_ctx->mlock);
- } else {
- mutex_lock(&gsi_ctx->mlock);
- ctx->state = GSI_CHAN_STATE_ALLOCATED;
- mutex_unlock(&gsi_ctx->mlock);
- }
- erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
- GSI_NO_EVT_ERINDEX;
- if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
- GSIERR("invalid erindex %u\n", erindex);
- devm_kfree(gsi_ctx->dev, user_data);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (erindex < GSI_EVT_RING_MAX) {
- ctx->evtr = &gsi_ctx->evtr[erindex];
- if (props->prot != GSI_CHAN_PROT_GCI)
- atomic_inc(&ctx->evtr->chan_ref_cnt);
- if (props->prot != GSI_CHAN_PROT_GCI &&
- ctx->evtr->props.exclusive &&
- atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
- ctx->evtr->chan = ctx;
- }
- gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
- spin_lock_init(&ctx->ring.slock);
- gsi_init_chan_ring(props, &ctx->ring);
- if (!props->max_re_expected)
- ctx->props.max_re_expected = ctx->ring.max_num_elem;
- ctx->user_data = user_data;
- *chan_hdl = props->ch_id;
- ctx->allocated = true;
- ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
- atomic_inc(&gsi_ctx->num_chan);
- if (props->prot == GSI_CHAN_PROT_GCI) {
- gsi_ctx->coal_info.ch_id = props->ch_id;
- gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
- }
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_alloc_channel);
- static int gsi_alloc_ap_channel(unsigned int chan_hdl)
- {
- struct gsi_chan_ctx *ctx;
- uint32_t val;
- int res;
- int ee;
- enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->allocated) {
- GSIERR("chan %d already allocated\n", chan_hdl);
- return -GSI_STATUS_NODEV;
- }
- memset(ctx, 0, sizeof(*ctx));
- mutex_init(&ctx->mlock);
- init_completion(&ctx->compl);
- atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
- mutex_lock(&gsi_ctx->mlock);
- ee = gsi_ctx->per.ee;
- gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("chan_hdl=%u timed out\n", chan_hdl);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("chan_hdl=%u allocation failed state=%d\n",
- chan_hdl, ctx->state);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- mutex_unlock(&gsi_ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- static void __gsi_write_channel_scratch(unsigned long chan_hdl,
- union __packed gsi_channel_scratch val)
- {
- gsi_writel(val.data.word1, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(val.data.word2, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(val.data.word3, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(val.data.word4, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- }
- static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
- union __packed gsi_wdi3_channel_scratch2_reg val)
- {
- gsi_writel(val.data.word1, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- }
- int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
- union __packed gsi_wdi_channel_scratch3_reg val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- ctx->scratch.wdi.endp_metadatareg_offset =
- val.wdi.endp_metadatareg_offset;
- ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
- gsi_writel(val.data.word1, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
- int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
- union __packed gsi_wdi2_channel_scratch2_reg val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- ctx->scratch.wdi2_new.endp_metadatareg_offset =
- val.wdi.endp_metadatareg_offset;
- ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
- val.wdi.update_ri_moderation_threshold =
- ctx->scratch.wdi2_new.update_ri_moderation_threshold;
- gsi_writel(val.data.word1, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
- static void __gsi_read_channel_scratch(unsigned long chan_hdl,
- union __packed gsi_channel_scratch * val)
- {
- val->data.word1 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- val->data.word2 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- val->data.word3 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- val->data.word4 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- }
- static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
- union __packed gsi_wdi3_channel_scratch2_reg * val)
- {
- val->data.word1 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- }
- int gsi_write_channel_scratch(unsigned long chan_hdl,
- union __packed gsi_channel_scratch val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- ctx->scratch = val;
- __gsi_write_channel_scratch(chan_hdl, val);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_channel_scratch);
- int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
- union __packed gsi_wdi3_channel_scratch2_reg val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- ctx->scratch.data.word3 = val.data.word1;
- __gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
- int gsi_read_channel_scratch(unsigned long chan_hdl,
- union __packed gsi_channel_scratch *val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- __gsi_read_channel_scratch(chan_hdl, val);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_read_channel_scratch);
- int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
- union __packed gsi_wdi3_channel_scratch2_reg * val)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- __gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
- int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
- struct __packed gsi_mhi_channel_scratch mscr)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
- gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- mutex_lock(&ctx->mlock);
- ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
- int gsi_query_channel_db_addr(unsigned long chan_hdl,
- uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!db_addr_wp_msb || !db_addr_wp_lsb) {
- GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
- db_addr_wp_lsb);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
- GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
- *db_addr_wp_msb = gsi_ctx->per.phys_addr +
- GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_query_channel_db_addr);
- int gsi_start_channel(unsigned long chan_hdl)
- {
- enum gsi_ch_cmd_opcode op = GSI_CH_START;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
- ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
- ctx->state != GSI_CHAN_STATE_STOPPED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- /* check if INTSET is in IRQ mode for GPI channel */
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
- if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
- val != GSI_INTR_IRQ) {
- GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
- BUG();
- }
- gsi_ctx->ch_dbg[chan_hdl].ch_start++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
- GSIDBG("GSI Channel Start, waiting for completion\n");
- gsi_channel_state_change_wait(chan_hdl,
- ctx,
- GSI_START_CMD_TIMEOUT_MS, op);
- if (ctx->state != GSI_CHAN_STATE_STARTED) {
- /*
- * Hardware returned unexpected status, unexpected
- * hardware state.
- */
- GSIERR("chan=%lu timed out, unexpected state=%u\n",
- chan_hdl, ctx->state);
- GSI_ASSERT();
- }
- GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
- /* write order MUST be MSB followed by LSB */
- val = ((ctx->ring.wp_local >> 32) &
- GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
- GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
- gsi_ctx->per.ee));
- mutex_unlock(&gsi_ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_start_channel);
- int gsi_stop_channel(unsigned long chan_hdl)
- {
- enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
- int res;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state == GSI_CHAN_STATE_STOPPED) {
- GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
- return GSI_STATUS_SUCCESS;
- }
- if (ctx->state != GSI_CHAN_STATE_STARTED &&
- ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
- ctx->state != GSI_CHAN_STATE_ERROR) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- /* check if INTSET is in IRQ mode for GPI channel */
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
- if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
- val != GSI_INTR_IRQ) {
- GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
- BUG();
- }
- gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
- GSIDBG("GSI Channel Stop, waiting for completion\n");
- gsi_channel_state_change_wait(chan_hdl,
- ctx,
- GSI_STOP_CMD_TIMEOUT_MS, op);
- if (ctx->state != GSI_CHAN_STATE_STOPPED &&
- ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
- GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
- res = -GSI_STATUS_BAD_STATE;
- BUG();
- goto free_lock;
- }
- if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
- GSIERR("chan=%lu busy try again\n", chan_hdl);
- res = -GSI_STATUS_AGAIN;
- goto free_lock;
- }
- res = GSI_STATUS_SUCCESS;
- free_lock:
- mutex_unlock(&gsi_ctx->mlock);
- return res;
- }
- EXPORT_SYMBOL(gsi_stop_channel);
- int gsi_stop_db_channel(unsigned long chan_hdl)
- {
- enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
- int res;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state == GSI_CHAN_STATE_STOPPED) {
- GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
- return GSI_STATUS_SUCCESS;
- }
- if (ctx->state != GSI_CHAN_STATE_STARTED &&
- ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&ctx->compl,
- msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
- if (res == 0) {
- GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
- res = -GSI_STATUS_TIMED_OUT;
- goto free_lock;
- }
- if (ctx->state != GSI_CHAN_STATE_STOPPED &&
- ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
- GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
- res = -GSI_STATUS_BAD_STATE;
- goto free_lock;
- }
- if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
- GSIERR("chan=%lu busy try again\n", chan_hdl);
- res = -GSI_STATUS_AGAIN;
- goto free_lock;
- }
- res = GSI_STATUS_SUCCESS;
- free_lock:
- mutex_unlock(&gsi_ctx->mlock);
- return res;
- }
- EXPORT_SYMBOL(gsi_stop_db_channel);
- int gsi_reset_channel(unsigned long chan_hdl)
- {
- enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
- int res;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- bool reset_done = false;
- uint32_t retry_cnt = 0;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- /*
- * In WDI3 case, if SAP enabled but no client connected,
- * GSI will be in allocated state. When SAP disabled,
- * gsi_reset_channel will be called and reset is needed.
- */
- if (ctx->state != GSI_CHAN_STATE_STOPPED &&
- ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&gsi_ctx->mlock);
- reset:
- reinit_completion(&ctx->compl);
- gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- revrfy_chnlstate:
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
- ctx->state);
- /* GSI register update state not sync with gsi channel
- * context state not sync, need to wait for 1ms to sync.
- */
- retry_cnt++;
- if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
- usleep_range(GSI_RESET_WA_MIN_SLEEP,
- GSI_RESET_WA_MAX_SLEEP);
- goto revrfy_chnlstate;
- }
- /*
- * Hardware returned incorrect state, unexpected
- * hardware state.
- */
- GSI_ASSERT();
- }
- /* Hardware issue fixed from GSI 2.0 and no need for the WA */
- if (gsi_ctx->per.ver >= GSI_VER_2_0)
- reset_done = true;
- /* workaround: reset GSI producers again */
- if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
- usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
- reset_done = true;
- goto reset;
- }
- if (ctx->props.cleanup_cb)
- gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
- gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
- ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
- gsi_init_chan_ring(&ctx->props, &ctx->ring);
- /* restore scratch */
- __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
- mutex_unlock(&gsi_ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_reset_channel);
- int gsi_dealloc_channel(unsigned long chan_hdl)
- {
- enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
- int res;
- uint32_t val;
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- /*In GSI_VER_2_2 version deallocation channel not supported*/
- if (gsi_ctx->per.ver != GSI_VER_2_2) {
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&ctx->compl);
- gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
- val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
- GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
- ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
- if (res == 0) {
- GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
- mutex_unlock(&gsi_ctx->mlock);
- return -GSI_STATUS_TIMED_OUT;
- }
- if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
- GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
- ctx->state);
- /* Hardware returned incorrect value */
- GSI_ASSERT();
- }
- mutex_unlock(&gsi_ctx->mlock);
- } else {
- mutex_lock(&gsi_ctx->mlock);
- GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
- ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
- GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
- ctx->state);
- mutex_unlock(&gsi_ctx->mlock);
- }
- devm_kfree(gsi_ctx->dev, ctx->user_data);
- ctx->allocated = false;
- if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
- atomic_dec(&ctx->evtr->chan_ref_cnt);
- atomic_dec(&gsi_ctx->num_chan);
- if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
- gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
- gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
- }
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_dealloc_channel);
- void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
- {
- unsigned long now = jiffies_to_msecs(jiffies);
- unsigned long elapsed;
- if (used == 0) {
- elapsed = now - ctx->stats.dp.last_timestamp;
- if (ctx->stats.dp.empty_time < elapsed)
- ctx->stats.dp.empty_time = elapsed;
- }
- if (used <= ctx->props.max_re_expected / 3)
- ++ctx->stats.dp.ch_below_lo;
- else if (used <= 2 * ctx->props.max_re_expected / 3)
- ++ctx->stats.dp.ch_below_hi;
- else
- ++ctx->stats.dp.ch_above_hi;
- ctx->stats.dp.last_timestamp = now;
- }
- static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
- uint16_t *num_free_re)
- {
- uint16_t start;
- uint16_t end;
- uint64_t rp;
- int ee = gsi_ctx->per.ee;
- uint16_t used;
- WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
- if (!ctx->evtr) {
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
- rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
- ctx->ring.rp = rp;
- } else {
- rp = ctx->ring.rp_local;
- }
- start = gsi_find_idx_from_addr(&ctx->ring, rp);
- end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
- if (end >= start)
- used = end - start;
- else
- used = ctx->ring.max_num_elem + 1 - (start - end);
- *num_free_re = ctx->ring.max_num_elem - used;
- }
- int gsi_query_channel_info(unsigned long chan_hdl,
- struct gsi_chan_info *info)
- {
- struct gsi_chan_ctx *ctx;
- spinlock_t *slock;
- unsigned long flags;
- uint64_t rp;
- uint64_t wp;
- int ee;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch || !info) {
- GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->evtr) {
- slock = &ctx->evtr->ring.slock;
- info->evt_valid = true;
- } else {
- slock = &ctx->ring.slock;
- info->evt_valid = false;
- }
- spin_lock_irqsave(slock, flags);
- ee = gsi_ctx->per.ee;
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
- rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
- ctx->ring.rp = rp;
- info->rp = rp;
- wp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
- wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
- ctx->ring.wp = wp;
- info->wp = wp;
- if (info->evt_valid) {
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
- rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
- << 32;
- info->evt_rp = rp;
- wp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
- wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
- << 32;
- info->evt_wp = wp;
- }
- spin_unlock_irqrestore(slock, flags);
- GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
- chan_hdl, info->rp, info->wp,
- info->evt_valid, info->evt_rp, info->evt_wp);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_query_channel_info);
- int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
- {
- struct gsi_chan_ctx *ctx;
- spinlock_t *slock;
- unsigned long flags;
- uint64_t rp;
- uint64_t wp;
- uint64_t rp_local;
- int ee;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
- GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
- chan_hdl, is_empty);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- ee = gsi_ctx->per.ee;
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
- GSIERR("op not supported for protocol %u\n", ctx->props.prot);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->evtr)
- slock = &ctx->evtr->ring.slock;
- else
- slock = &ctx->ring.slock;
- spin_lock_irqsave(slock, flags);
- if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
- rp |= ctx->evtr->ring.rp & 0xFFFFFFFF00000000;
- ctx->evtr->ring.rp = rp;
- wp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
- wp |= ctx->evtr->ring.wp & 0xFFFFFFFF00000000;
- ctx->evtr->ring.wp = wp;
- rp_local = ctx->evtr->ring.rp_local;
- } else {
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
- rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
- ctx->ring.rp = rp;
- wp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
- wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
- ctx->ring.wp = wp;
- rp_local = ctx->ring.rp_local;
- }
- if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
- *is_empty = (rp_local == rp) ? true : false;
- else
- *is_empty = (wp == rp) ? true : false;
- spin_unlock_irqrestore(slock, flags);
- if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
- GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
- chan_hdl, ctx->evtr->id, rp, wp, rp_local);
- else
- GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
- chan_hdl, rp, wp, rp_local);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_is_channel_empty);
- int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
- {
- int i;
- int end;
- if (!ctx->user_data[idx].valid) {
- ctx->user_data[idx].valid = true;
- return idx;
- }
- /*
- * at this point we need to find an "escape buffer" for the cookie
- * as the userdata in this spot is in use. This happens if the TRE at
- * idx is not completed yet and it is getting reused by a new TRE.
- */
- ctx->stats.userdata_in_use++;
- end = ctx->ring.max_num_elem + 1;
- for (i = 0; i < GSI_VEID_MAX; i++) {
- if (!ctx->user_data[end + i].valid) {
- ctx->user_data[end + i].valid = true;
- return end + i;
- }
- }
- /* Go over original userdata when escape buffer is full (costly) */
- GSIDBG("escape buffer is full\n");
- for (i = 0; i < end; i++) {
- if (!ctx->user_data[i].valid) {
- ctx->user_data[i].valid = true;
- return i;
- }
- }
- /* Everything is full (possibly a stall) */
- GSIERR("both userdata array and escape buffer is full\n");
- BUG();
- return 0xFFFF;
- }
- int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
- struct gsi_xfer_elem *xfer)
- {
- struct gsi_gci_tre gci_tre;
- struct gsi_gci_tre *tre_gci_ptr;
- uint16_t idx;
- memset(&gci_tre, 0, sizeof(gci_tre));
- if (xfer->addr & 0xFFFFFF0000000000) {
- GSIERR("chan_hdl=%u add too large=%llx\n",
- ctx->props.ch_id, xfer->addr);
- return -EINVAL;
- }
- if (xfer->type != GSI_XFER_ELEM_DATA) {
- GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
- xfer->type);
- return -EINVAL;
- }
- idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
- tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
- idx * ctx->ring.elem_sz);
- gci_tre.buffer_ptr = xfer->addr;
- gci_tre.buf_len = xfer->len;
- gci_tre.re_type = GSI_RE_COAL;
- gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
- if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
- return -EPERM;
- /* write the TRE to ring */
- *tre_gci_ptr = gci_tre;
- ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
- return 0;
- }
- int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
- struct gsi_xfer_elem *xfer)
- {
- struct gsi_tre tre;
- struct gsi_tre *tre_ptr;
- uint16_t idx;
- memset(&tre, 0, sizeof(tre));
- tre.buffer_ptr = xfer->addr;
- tre.buf_len = xfer->len;
- if (xfer->type == GSI_XFER_ELEM_DATA) {
- tre.re_type = GSI_RE_XFER;
- } else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
- tre.re_type = GSI_RE_IMMD_CMD;
- } else if (xfer->type == GSI_XFER_ELEM_NOP) {
- tre.re_type = GSI_RE_NOP;
- } else {
- GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
- xfer->type);
- return -EINVAL;
- }
- tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
- tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
- tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
- tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
- idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
- tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
- idx * ctx->ring.elem_sz);
- /* write the TRE to ring */
- *tre_ptr = tre;
- ctx->user_data[idx].valid = true;
- ctx->user_data[idx].p = xfer->xfer_user_data;
- return 0;
- }
- int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
- struct gsi_xfer_elem *xfer, bool ring_db)
- {
- struct gsi_chan_ctx *ctx;
- uint16_t free;
- uint64_t wp_rollback;
- int i;
- spinlock_t *slock;
- unsigned long flags;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
- GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
- chan_hdl, num_xfers, xfer);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (unlikely(gsi_ctx->chan[chan_hdl].state
- == GSI_CHAN_STATE_NOT_ALLOCATED)) {
- GSIERR("bad state %d\n",
- gsi_ctx->chan[chan_hdl].state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
- GSIERR("op not supported for protocol %u\n", ctx->props.prot);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->evtr)
- slock = &ctx->evtr->ring.slock;
- else
- slock = &ctx->ring.slock;
- spin_lock_irqsave(slock, flags);
- /* allow only ring doorbell */
- if (!num_xfers)
- goto ring_doorbell;
- /*
- * for GCI channels the responsibility is on the caller to make sure
- * there is enough room in the TRE.
- */
- if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
- __gsi_query_channel_free_re(ctx, &free);
- if (num_xfers > free) {
- GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
- chan_hdl, num_xfers, free);
- spin_unlock_irqrestore(slock, flags);
- return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
- }
- }
- wp_rollback = ctx->ring.wp_local;
- for (i = 0; i < num_xfers; i++) {
- if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
- if (__gsi_populate_gci_tre(ctx, &xfer[i]))
- break;
- } else {
- if (__gsi_populate_tre(ctx, &xfer[i]))
- break;
- }
- gsi_incr_ring_wp(&ctx->ring);
- }
- if (i != num_xfers) {
- /* reject all the xfers */
- ctx->ring.wp_local = wp_rollback;
- spin_unlock_irqrestore(slock, flags);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx->stats.queued += num_xfers;
- ring_doorbell:
- if (ring_db) {
- /* ensure TRE is set before ringing doorbell */
- wmb();
- gsi_ring_chan_doorbell(ctx);
- }
- spin_unlock_irqrestore(slock, flags);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_queue_xfer);
- int gsi_start_xfer(unsigned long chan_hdl)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
- GSIERR("op not supported for protocol %u\n", ctx->props.prot);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->ring.wp == ctx->ring.wp_local)
- return GSI_STATUS_SUCCESS;
- gsi_ring_chan_doorbell(ctx);
- return GSI_STATUS_SUCCESS;
- };
- EXPORT_SYMBOL(gsi_start_xfer);
- int gsi_poll_channel(unsigned long chan_hdl,
- struct gsi_chan_xfer_notify *notify)
- {
- int unused_var;
- return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
- }
- EXPORT_SYMBOL(gsi_poll_channel);
- int gsi_poll_n_channel(unsigned long chan_hdl,
- struct gsi_chan_xfer_notify *notify,
- int expected_num, int *actual_num)
- {
- struct gsi_chan_ctx *ctx;
- uint64_t rp;
- int ee;
- int i;
- unsigned long flags;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch || !notify ||
- !actual_num || expected_num <= 0) {
- GSIERR("bad params chan_hdl=%lu notify=%pK\n",
- chan_hdl, notify);
- GSIERR("actual_num=%pK expected_num=%d\n",
- actual_num, expected_num);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- ee = gsi_ctx->per.ee;
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
- GSIERR("op not supported for protocol %u\n", ctx->props.prot);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (!ctx->evtr) {
- GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
- if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
- /* update rp to see of we have anything new to process */
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
- rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
- ctx->evtr->ring.rp = rp;
- /* read gsi event ring rp again if last read is empty */
- if (rp == ctx->evtr->ring.rp_local) {
- /* event ring is empty */
- gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
- /* do another read to close a small window */
- __iowmb();
- rp = gsi_readl(gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(
- ctx->evtr->id, ee));
- rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
- ctx->evtr->ring.rp = rp;
- if (rp == ctx->evtr->ring.rp_local) {
- spin_unlock_irqrestore(
- &ctx->evtr->ring.slock,
- flags);
- ctx->stats.poll_empty++;
- return GSI_STATUS_POLL_EMPTY;
- }
- }
- }
- *actual_num = gsi_get_complete_num(&ctx->evtr->ring,
- ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
- if (*actual_num > expected_num)
- *actual_num = expected_num;
- for (i = 0; i < *actual_num; i++)
- gsi_process_evt_re(ctx->evtr, notify + i, false);
- spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
- ctx->stats.poll_ok++;
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_poll_n_channel);
- int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
- {
- struct gsi_chan_ctx *ctx, *coal_ctx;
- enum gsi_chan_mode curr;
- unsigned long flags;
- enum gsi_chan_mode chan_mode;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
- GSIERR("op not supported for protocol %u\n", ctx->props.prot);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (!ctx->evtr || !ctx->evtr->props.exclusive) {
- GSIERR("cannot configure mode on chan_hdl=%lu\n",
- chan_hdl);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (atomic_read(&ctx->poll_mode))
- curr = GSI_CHAN_MODE_POLL;
- else
- curr = GSI_CHAN_MODE_CALLBACK;
- if (mode == curr) {
- GSIDBG("already in requested mode %u chan_hdl=%lu\n",
- curr, chan_hdl);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- spin_lock_irqsave(&gsi_ctx->slock, flags);
- if (curr == GSI_CHAN_MODE_CALLBACK &&
- mode == GSI_CHAN_MODE_POLL) {
- __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
- gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
- atomic_set(&ctx->poll_mode, mode);
- if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
- atomic_set(&ctx->evtr->chan->poll_mode, mode);
- } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
- coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
- if (coal_ctx != NULL)
- atomic_set(&coal_ctx->poll_mode, mode);
- }
- GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
- ctx->evtr->id, mode);
- ctx->stats.callback_to_poll++;
- }
- if (curr == GSI_CHAN_MODE_POLL &&
- mode == GSI_CHAN_MODE_CALLBACK) {
- atomic_set(&ctx->poll_mode, mode);
- if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
- atomic_set(&ctx->evtr->chan->poll_mode, mode);
- } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
- coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
- if (coal_ctx != NULL)
- atomic_set(&coal_ctx->poll_mode, mode);
- }
- __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
- GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
- ctx->evtr->id, mode);
- /*
- * In GSI 2.2 and 2.5 there is a limitation that can lead
- * to losing an interrupt. For these versions an
- * explicit check is needed after enabling the interrupt
- */
- if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
- gsi_ctx->per.ver == GSI_VER_2_5) &&
- !gsi_ctx->per.skip_ieob_mask_wa) {
- u32 src = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
- gsi_ctx->per.ee));
- if (src & (1 << ctx->evtr->id)) {
- __gsi_config_ieob_irq(
- gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
- gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
- gsi_ctx->per.ee));
- spin_unlock_irqrestore(&gsi_ctx->slock, flags);
- spin_lock_irqsave(&ctx->evtr->ring.slock,
- flags);
- chan_mode = atomic_xchg(&ctx->poll_mode,
- GSI_CHAN_MODE_POLL);
- spin_unlock_irqrestore(
- &ctx->evtr->ring.slock, flags);
- ctx->stats.poll_pending_irq++;
- GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
- ctx->stats.poll_pending_irq,
- chan_mode);
- if (chan_mode == GSI_CHAN_MODE_POLL)
- return GSI_STATUS_SUCCESS;
- else
- return -GSI_STATUS_PENDING_IRQ;
- }
- }
- ctx->stats.poll_to_callback++;
- }
- spin_unlock_irqrestore(&gsi_ctx->slock, flags);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_config_channel_mode);
- int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
- union gsi_channel_scratch *scr)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || !scr) {
- GSIERR("bad params props=%pK scr=%pK\n", props, scr);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&ctx->mlock);
- *props = ctx->props;
- *scr = ctx->scratch;
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_get_channel_cfg);
- int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
- union gsi_channel_scratch *scr)
- {
- struct gsi_chan_ctx *ctx;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!props || gsi_validate_channel_props(props)) {
- GSIERR("bad params props=%pK\n", props);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (chan_hdl >= gsi_ctx->max_ch) {
- GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
- GSIERR("bad state %d\n", ctx->state);
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- if (ctx->props.ch_id != props->ch_id ||
- ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
- GSIERR("changing immutable fields not supported\n");
- return -GSI_STATUS_UNSUPPORTED_OP;
- }
- mutex_lock(&ctx->mlock);
- ctx->props = *props;
- if (scr)
- ctx->scratch = *scr;
- gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
- ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
- gsi_init_chan_ring(&ctx->props, &ctx->ring);
- /* restore scratch */
- __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
- mutex_unlock(&ctx->mlock);
- return GSI_STATUS_SUCCESS;
- }
- EXPORT_SYMBOL(gsi_set_channel_cfg);
- static void gsi_configure_ieps(void *base, enum gsi_ver ver)
- {
- void __iomem *gsi_base = base;
- gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
- gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
- gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
- gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
- gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
- gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
- gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
- gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
- gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
- gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
- gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
- gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
- gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
- if (ver >= GSI_VER_2_5)
- gsi_writel(17,
- gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS);
- }
- static void gsi_configure_bck_prs_matrix(void *base)
- {
- void __iomem *gsi_base = (void __iomem *) base;
- /*
- * For now, these are default values. In the future, GSI FW image will
- * produce optimized back-pressure values based on the FW image.
- */
- gsi_writel(0xfffffffe,
- gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff,
- gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffefff,
- gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff,
- gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
- gsi_writel(0x00000000,
- gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
- gsi_writel(0x00000000,
- gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
- gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
- gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
- gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
- gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
- gsi_writel(0xffffffff,
- gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
- gsi_writel(0xff03ffff,
- gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
- }
- int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->base) {
- GSIERR("access to GSI HW has not been mapped\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
- GSIERR("Incorrect version %d\n", ver);
- return -GSI_STATUS_ERROR;
- }
- gsi_writel(0, gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
- gsi_writel(per_base_addr,
- gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
- gsi_configure_bck_prs_matrix((void *)gsi_ctx->base);
- gsi_configure_ieps(gsi_ctx->base, ver);
- return 0;
- }
- EXPORT_SYMBOL(gsi_configure_regs);
- int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
- {
- void __iomem *gsi_base;
- uint32_t value;
- if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
- GSIERR("Incorrect version %d\n", ver);
- return -GSI_STATUS_ERROR;
- }
- gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
- if (!gsi_base) {
- GSIERR("ioremap failed\n");
- return -GSI_STATUS_RES_ALLOC_FAILURE;
- }
- /* Enable the MCS and set to x2 clocks */
- if (ver >= GSI_VER_1_2) {
- value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
- gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK) |
- ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
- GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
- ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
- GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
- } else {
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK));
- }
- /* GSI frequency is peripheral frequency divided by 3 (2+1) */
- if (ver >= GSI_VER_2_5)
- value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) &
- GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK);
- gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
- iounmap(gsi_base);
- return 0;
- }
- EXPORT_SYMBOL(gsi_enable_fw);
- void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
- unsigned long *size, enum gsi_ver ver)
- {
- unsigned long maxn;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return;
- }
- switch (ver) {
- case GSI_VER_1_0:
- case GSI_VER_1_2:
- case GSI_VER_1_3:
- maxn = GSI_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_2_0:
- maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_2_2:
- maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_2_5:
- maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_2_7:
- maxn = GSI_V2_7_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_2_9:
- maxn = GSI_V2_9_GSI_INST_RAM_n_MAXn;
- break;
- case GSI_VER_ERR:
- case GSI_VER_MAX:
- default:
- GSIERR("GSI version is not supported %d\n", ver);
- WARN_ON(1);
- return;
- }
- if (size)
- *size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1);
- if (base_offset) {
- if (ver < GSI_VER_2_5)
- *base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
- else
- *base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0);
- }
- }
- EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
- int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
- {
- enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
- uint32_t val;
- int res;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (chan_idx >= gsi_ctx->max_ch || !code) {
- GSIERR("bad params chan_idx=%d\n", chan_idx);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
- /* invalidate the response */
- gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
- gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
- val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
- ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
- ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
- msecs_to_jiffies(GSI_CMD_TIMEOUT));
- if (res == 0) {
- GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
- res = -GSI_STATUS_TIMED_OUT;
- goto free_lock;
- }
- gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
- GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
- GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
- *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
- res = -GSI_STATUS_AGAIN;
- goto free_lock;
- }
- if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
- GSIERR("No response received\n");
- res = -GSI_STATUS_ERROR;
- goto free_lock;
- }
- res = GSI_STATUS_SUCCESS;
- *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
- free_lock:
- mutex_unlock(&gsi_ctx->mlock);
- return res;
- }
- EXPORT_SYMBOL(gsi_halt_channel_ee);
- int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
- {
- enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
- struct gsi_chan_ctx *ctx;
- uint32_t val;
- int res;
- if (chan_idx >= gsi_ctx->max_ch || !code) {
- GSIERR("bad params chan_idx=%d\n", chan_idx);
- return -GSI_STATUS_INVALID_PARAMS;
- }
- if (ee == 0)
- return gsi_alloc_ap_channel(chan_idx);
- mutex_lock(&gsi_ctx->mlock);
- reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
- /* invalidate the response */
- gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
- gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
- ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
- ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
- GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
- gsi_writel(val, gsi_ctx->base +
- GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
- res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
- msecs_to_jiffies(GSI_CMD_TIMEOUT));
- if (res == 0) {
- GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
- res = -GSI_STATUS_TIMED_OUT;
- goto free_lock;
- }
- gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
- GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
- GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
- *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
- res = -GSI_STATUS_RES_ALLOC_FAILURE;
- goto free_lock;
- }
- if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
- GSIERR("No response received\n");
- res = -GSI_STATUS_ERROR;
- goto free_lock;
- }
- if (ee == 0) {
- ctx = &gsi_ctx->chan[chan_idx];
- gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
- }
- res = GSI_STATUS_SUCCESS;
- *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
- free_lock:
- mutex_unlock(&gsi_ctx->mlock);
- return res;
- }
- EXPORT_SYMBOL(gsi_alloc_channel_ee);
- int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return -GSI_STATUS_NODEV;
- }
- if (!gsi_ctx->base) {
- GSIERR("access to GSI HW has not been mapped\n");
- return -GSI_STATUS_INVALID_PARAMS;
- }
- gsi_writel(per_ep_index,
- gsi_ctx->base +
- GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(chan_num, ee));
- return 0;
- }
- EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
- void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
- uint32_t db_addr_low, uint32_t db_addr_high)
- {
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return;
- }
- if (gsi_ctx->per.ver >= GSI_VER_2_9) {
- gsi_writel(db_addr_low, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- gsi_writel(db_addr_high, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- } else {
- gsi_writel(db_addr_low, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- gsi_writel(db_addr_high, gsi_ctx->base +
- GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_ring_hdl,
- gsi_ctx->per.ee));
- }
- }
- EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
- void gsi_wdi3_dump_register(unsigned long chan_hdl)
- {
- uint32_t val;
- if (!gsi_ctx) {
- pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
- return;
- }
- GSIDBG("reg dump ch id %ld\n", chan_hdl);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_QOS_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_QOS_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS 0x%x\n", val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS 0x%x\n", val);
- }
- EXPORT_SYMBOL(gsi_wdi3_dump_register);
- static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
- unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
- {
- union __packed gsi_channel_scratch scr;
- /* below sequence is not atomic. assumption is sequencer specific fields
- * will remain unchanged across this sequence
- */
- /* READ */
- scr.data.word1 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- scr.data.word2 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- scr.data.word3 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- scr.data.word4 = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- /* UPDATE */
- scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr;
- scr.mhi.assert_bit40 = mscr.assert_bit40;
- scr.mhi.polling_configuration = mscr.polling_configuration;
- scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled;
- scr.mhi.polling_mode = mscr.polling_mode;
- scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold;
- if (gsi_ctx->per.ver < GSI_VER_2_5) {
- scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
- scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
- }
- /* WRITE */
- gsi_writel(scr.data.word1, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(scr.data.word2, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(scr.data.word3, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- gsi_writel(scr.data.word4, gsi_ctx->base +
- GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
- gsi_ctx->per.ee));
- return scr;
- }
- static int msm_gsi_probe(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
- pr_debug("gsi_probe\n");
- gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
- if (!gsi_ctx) {
- dev_err(dev, "failed to allocated gsi context\n");
- return -ENOMEM;
- }
- gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
- "gsi", 0);
- if (gsi_ctx->ipc_logbuf == NULL)
- GSIERR("failed to create IPC log, continue...\n");
- gsi_ctx->dev = dev;
- init_completion(&gsi_ctx->gen_ee_cmd_compl);
- gsi_debugfs_init();
- return 0;
- }
- static struct platform_driver msm_gsi_driver = {
- .probe = msm_gsi_probe,
- .driver = {
- .name = "gsi",
- .of_match_table = msm_gsi_match,
- },
- };
- static struct platform_device *pdev;
- /**
- * Module Init.
- */
- static int __init gsi_init(void)
- {
- int ret;
- pr_debug("%s\n", __func__);
- ret = platform_driver_register(&msm_gsi_driver);
- if (ret < 0)
- goto out;
- if (running_emulation) {
- pdev = platform_device_register_simple("gsi", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- platform_driver_unregister(&msm_gsi_driver);
- goto out;
- }
- }
- out:
- return ret;
- }
- arch_initcall(gsi_init);
- /*
- * Module exit.
- */
- static void __exit gsi_exit(void)
- {
- if (running_emulation && pdev)
- platform_device_unregister(pdev);
- platform_driver_unregister(&msm_gsi_driver);
- }
- module_exit(gsi_exit);
- MODULE_LICENSE("GPL v2");
- MODULE_DESCRIPTION("Generic Software Interface (GSI)");
|