msm_gpi.c 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/atomic.h>
  7. #include <linux/completion.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/device.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/io.h>
  13. #include <linux/iommu.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/ipc_logging.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_dma.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/sched/clock.h>
  26. #include <linux/slab.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/cacheflush.h>
  29. #include <linux/msm_gpi.h>
  30. #include <linux/delay.h>
  31. #include "../dmaengine.h"
  32. #include "../virt-dma.h"
  33. #include "msm_gpi_mmio.h"
  34. /* global logging macros */
  35. #define GPI_LOG(gpi_dev, fmt, ...) do { \
  36. if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
  37. dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
  38. if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
  39. ipc_log_string(gpi_dev->ilctxt, \
  40. "%s: " fmt, __func__, ##__VA_ARGS__); \
  41. } while (0)
  42. #define GPI_ERR(gpi_dev, fmt, ...) do { \
  43. if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
  44. dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
  45. if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
  46. ipc_log_string(gpi_dev->ilctxt, \
  47. "%s: " fmt, __func__, ##__VA_ARGS__); \
  48. } while (0)
  49. /* gpii specific logging macros */
  50. #define GPII_INFO(gpii, ch, fmt, ...) do { \
  51. if (gpii->klog_lvl >= LOG_LVL_INFO) \
  52. pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
  53. __func__, ##__VA_ARGS__); \
  54. if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
  55. ipc_log_string(gpii->ilctxt, \
  56. "ch:%u %s: " fmt, ch, \
  57. __func__, ##__VA_ARGS__); \
  58. } while (0)
  59. #define GPII_ERR(gpii, ch, fmt, ...) do { \
  60. if (gpii->klog_lvl >= LOG_LVL_ERROR) \
  61. pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
  62. __func__, ##__VA_ARGS__); \
  63. if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
  64. ipc_log_string(gpii->ilctxt, \
  65. "ch:%u %s: " fmt, ch, \
  66. __func__, ##__VA_ARGS__); \
  67. } while (0)
  68. #define GPII_CRITIC(gpii, ch, fmt, ...) do { \
  69. if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
  70. pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
  71. __func__, ##__VA_ARGS__); \
  72. if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
  73. ipc_log_string(gpii->ilctxt, \
  74. "ch:%u %s: " fmt, ch, \
  75. __func__, ##__VA_ARGS__); \
  76. } while (0)
  77. enum DEBUG_LOG_LVL {
  78. LOG_LVL_MASK_ALL,
  79. LOG_LVL_CRITICAL,
  80. LOG_LVL_ERROR,
  81. LOG_LVL_INFO,
  82. LOG_LVL_VERBOSE,
  83. LOG_LVL_REG_ACCESS,
  84. };
  85. enum EV_PRIORITY {
  86. EV_PRIORITY_ISR,
  87. EV_PRIORITY_TASKLET,
  88. };
  89. #define GPI_DMA_DRV_NAME "gpi_dma"
  90. #define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
  91. #if IS_ENABLED(CONFIG_MSM_GPI_DMA_DEBUG)
  92. #define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
  93. #define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
  94. #define CMD_TIMEOUT_MS (1000)
  95. #define GPII_REG(gpii, ch, fmt, ...) do { \
  96. if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
  97. pr_info("%s:%u:%s: " fmt, gpii->label, \
  98. ch, __func__, ##__VA_ARGS__); \
  99. if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
  100. ipc_log_string(gpii->ilctxt, \
  101. "ch:%u %s: " fmt, ch, \
  102. __func__, ##__VA_ARGS__); \
  103. } while (0)
  104. #define GPII_VERB(gpii, ch, fmt, ...) do { \
  105. if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
  106. pr_info("%s:%u:%s: " fmt, gpii->label, \
  107. ch, __func__, ##__VA_ARGS__); \
  108. if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
  109. ipc_log_string(gpii->ilctxt, \
  110. "ch:%u %s: " fmt, ch, \
  111. __func__, ##__VA_ARGS__); \
  112. } while (0)
  113. #else
  114. #define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
  115. #define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
  116. #define CMD_TIMEOUT_MS (250)
  117. /* verbose and register logging are disabled if !debug */
  118. #define GPII_REG(gpii, ch, fmt, ...)
  119. #define GPII_VERB(gpii, ch, fmt, ...)
  120. #endif
  121. #define IPC_LOG_PAGES (2)
  122. #define GPI_LABEL_SIZE (256)
  123. #define GPI_DBG_COMMON (99)
  124. #define MAX_CHANNELS_PER_GPII (2)
  125. #define GPI_TX_CHAN (0)
  126. #define GPI_RX_CHAN (1)
  127. #define STATE_IGNORE (U32_MAX)
  128. #define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
  129. #define NOOP_TRE_MASK(link_rx, bei, ieot, ieob, ch) \
  130. ((0x0 << 20) | (0x0 << 16) | (link_rx << 11) | (bei << 10) | \
  131. (ieot << 9) | (ieob << 8) | ch)
  132. #define NOOP_TRE (0x0 << 20 | 0x1 << 16)
  133. #define HID_CMD_TIMEOUT_MS (250)
  134. struct __packed gpi_error_log_entry {
  135. u32 routine : 4;
  136. u32 type : 4;
  137. u32 reserved0 : 4;
  138. u32 code : 4;
  139. u32 reserved1 : 3;
  140. u32 chid : 5;
  141. u32 reserved2 : 1;
  142. u32 chtype : 1;
  143. u32 ee : 1;
  144. };
  145. struct __packed xfer_compl_event {
  146. u64 ptr;
  147. u32 length : 24;
  148. u8 code;
  149. u16 status;
  150. u8 type;
  151. u8 chid;
  152. };
  153. struct __packed qup_q2spi_status {
  154. u32 ptr_l;
  155. u32 ptr_h : 8;
  156. u32 resvd_0 : 8;
  157. u32 value : 8;
  158. u32 resvd_1 : 8;
  159. u32 length : 20;
  160. u32 resvd_2 : 4;
  161. u8 code : 8;
  162. u16 status : 16;
  163. u8 type : 8;
  164. u8 ch_id : 8;
  165. };
  166. struct __packed immediate_data_event {
  167. u8 data_bytes[8];
  168. u8 length : 4;
  169. u8 resvd : 4;
  170. u16 tre_index;
  171. u8 code;
  172. u16 status;
  173. u8 type;
  174. u8 chid;
  175. };
  176. struct __packed qup_notif_event {
  177. u32 status;
  178. u32 time;
  179. u32 count :24;
  180. u8 resvd;
  181. u16 resvd1;
  182. u8 type;
  183. u8 chid;
  184. };
  185. struct __packed gpi_ere {
  186. u32 dword[4];
  187. };
  188. union __packed gpi_event {
  189. struct __packed xfer_compl_event xfer_compl_event;
  190. struct __packed immediate_data_event immediate_data_event;
  191. struct __packed qup_notif_event qup_notif_event;
  192. struct __packed gpi_ere gpi_ere;
  193. struct __packed qup_q2spi_status q2spi_status;
  194. struct __packed qup_q2spi_cr_header_event q2spi_cr_header_event;
  195. };
  196. enum gpii_irq_settings {
  197. DEFAULT_IRQ_SETTINGS,
  198. MASK_IEOB_SETTINGS,
  199. };
  200. enum gpi_ev_state {
  201. DEFAULT_EV_CH_STATE = 0,
  202. EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
  203. EV_STATE_ALLOCATED,
  204. MAX_EV_STATES
  205. };
  206. static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
  207. [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
  208. [EV_STATE_ALLOCATED] = "ALLOCATED",
  209. };
  210. #define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
  211. "INVALID" : gpi_ev_state_str[state])
  212. enum gpi_ch_state {
  213. DEFAULT_CH_STATE = 0x0,
  214. CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
  215. CH_STATE_ALLOCATED = 0x1,
  216. CH_STATE_STARTED = 0x2,
  217. CH_STATE_STOPPED = 0x3,
  218. CH_STATE_STOP_IN_PROC = 0x4,
  219. CH_STATE_ENABLE_HID = 0x5,
  220. CH_STATE_DISABLE_HID = 0x6,
  221. CH_STATE_ERROR = 0xf,
  222. MAX_CH_STATES
  223. };
  224. static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
  225. [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
  226. [CH_STATE_ALLOCATED] = "ALLOCATED",
  227. [CH_STATE_STARTED] = "STARTED",
  228. [CH_STATE_STOPPED] = "STOPPED",
  229. [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
  230. [CH_STATE_ENABLE_HID] = "HID ENABLE",
  231. [CH_STATE_DISABLE_HID] = "HID DISABLE",
  232. [CH_STATE_ERROR] = "ERROR",
  233. };
  234. #define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
  235. "INVALID" : gpi_ch_state_str[state])
  236. enum gpi_cmd {
  237. GPI_CH_CMD_BEGIN,
  238. GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
  239. GPI_CH_CMD_START,
  240. GPI_CH_CMD_STOP,
  241. GPI_CH_CMD_RESET,
  242. GPI_CH_CMD_DE_ALLOC,
  243. GPI_CH_CMD_UART_SW_STALE,
  244. GPI_CH_CMD_UART_RFR_READY,
  245. GPI_CH_CMD_UART_RFR_NOT_READY,
  246. GPI_CH_CMD_ENABLE_HID,
  247. GPI_CH_CMD_DISABLE_HID,
  248. GPI_CH_CMD_END = GPI_CH_CMD_DISABLE_HID,
  249. GPI_EV_CMD_BEGIN,
  250. GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
  251. GPI_EV_CMD_RESET,
  252. GPI_EV_CMD_DEALLOC,
  253. GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
  254. GPI_MAX_CMD,
  255. };
  256. #define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
  257. static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
  258. [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
  259. [GPI_CH_CMD_START] = "CH START",
  260. [GPI_CH_CMD_STOP] = "CH STOP",
  261. [GPI_CH_CMD_RESET] = "CH_RESET",
  262. [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
  263. [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
  264. [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
  265. [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
  266. [GPI_CH_CMD_ENABLE_HID] = "CH Enable HID interrupt",
  267. [GPI_CH_CMD_DISABLE_HID] = "CH Disable HID interrupt",
  268. [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
  269. [GPI_EV_CMD_RESET] = "EV RESET",
  270. [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
  271. };
  272. #define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
  273. gpi_cmd_str[cmd])
  274. static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
  275. [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
  276. [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
  277. [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
  278. [MSM_GPI_QUP_FW_ERROR] = "UNHANDLED ERROR",
  279. [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
  280. [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
  281. [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
  282. [MSM_GPI_QUP_CR_HEADER] = "Doorbell CR EVENT"
  283. };
  284. #define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
  285. "INVALID" : gpi_cb_event_str[event])
  286. enum se_protocol {
  287. SE_PROTOCOL_SPI = 1,
  288. SE_PROTOCOL_UART = 2,
  289. SE_PROTOCOL_I2C = 3,
  290. SE_PROTOCOL_Q2SPI = 0xE,
  291. SE_MAX_PROTOCOL
  292. };
  293. /*
  294. * @DISABLE_STATE: no register access allowed
  295. * @CONFIG_STATE: client has configured the channel
  296. * @PREP_HARDWARE: register access is allowed
  297. * however, no processing EVENTS
  298. * @ACTIVE_STATE: channels are fully operational
  299. * @PREPARE_TERIMNATE: graceful termination of channels
  300. * register access is allowed
  301. * @PAUSE_STATE: channels are active, but not processing any events
  302. */
  303. enum gpi_pm_state {
  304. DISABLE_STATE,
  305. CONFIG_STATE,
  306. PREPARE_HARDWARE,
  307. ACTIVE_STATE,
  308. PREPARE_TERMINATE,
  309. PAUSE_STATE,
  310. MAX_PM_STATE
  311. };
  312. #define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
  313. static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
  314. [DISABLE_STATE] = "DISABLE",
  315. [CONFIG_STATE] = "CONFIG",
  316. [PREPARE_HARDWARE] = "PREPARE HARDWARE",
  317. [ACTIVE_STATE] = "ACTIVE",
  318. [PREPARE_TERMINATE] = "PREPARE TERMINATE",
  319. [PAUSE_STATE] = "PAUSE",
  320. };
  321. #define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
  322. "INVALID" : gpi_pm_state_str[state])
  323. static const struct {
  324. enum gpi_cmd gpi_cmd;
  325. u32 opcode;
  326. u32 state;
  327. u32 timeout_ms;
  328. } gpi_cmd_info[GPI_MAX_CMD] = {
  329. {
  330. GPI_CH_CMD_ALLOCATE,
  331. GPI_GPII_n_CH_CMD_ALLOCATE,
  332. CH_STATE_ALLOCATED,
  333. CMD_TIMEOUT_MS,
  334. },
  335. {
  336. GPI_CH_CMD_START,
  337. GPI_GPII_n_CH_CMD_START,
  338. CH_STATE_STARTED,
  339. CMD_TIMEOUT_MS,
  340. },
  341. {
  342. GPI_CH_CMD_STOP,
  343. GPI_GPII_n_CH_CMD_STOP,
  344. CH_STATE_STOPPED,
  345. CMD_TIMEOUT_MS,
  346. },
  347. {
  348. GPI_CH_CMD_RESET,
  349. GPI_GPII_n_CH_CMD_RESET,
  350. CH_STATE_ALLOCATED,
  351. CMD_TIMEOUT_MS,
  352. },
  353. {
  354. GPI_CH_CMD_DE_ALLOC,
  355. GPI_GPII_n_CH_CMD_DE_ALLOC,
  356. CH_STATE_NOT_ALLOCATED,
  357. CMD_TIMEOUT_MS,
  358. },
  359. {
  360. GPI_CH_CMD_UART_SW_STALE,
  361. GPI_GPII_n_CH_CMD_UART_SW_STALE,
  362. STATE_IGNORE,
  363. CMD_TIMEOUT_MS,
  364. },
  365. {
  366. GPI_CH_CMD_UART_RFR_READY,
  367. GPI_GPII_n_CH_CMD_UART_RFR_READY,
  368. STATE_IGNORE,
  369. CMD_TIMEOUT_MS,
  370. },
  371. {
  372. GPI_CH_CMD_UART_RFR_NOT_READY,
  373. GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
  374. STATE_IGNORE,
  375. CMD_TIMEOUT_MS,
  376. },
  377. {
  378. GPI_CH_CMD_ENABLE_HID,
  379. GPI_GPII_n_CH_CMD_ENABLE_HID,
  380. CH_STATE_ENABLE_HID,
  381. HID_CMD_TIMEOUT_MS,
  382. },
  383. {
  384. GPI_CH_CMD_DISABLE_HID,
  385. GPI_GPII_n_CH_CMD_DISABLE_HID,
  386. CH_STATE_DISABLE_HID,
  387. HID_CMD_TIMEOUT_MS,
  388. },
  389. {
  390. GPI_EV_CMD_ALLOCATE,
  391. GPI_GPII_n_EV_CH_CMD_ALLOCATE,
  392. EV_STATE_ALLOCATED,
  393. CMD_TIMEOUT_MS,
  394. },
  395. {
  396. GPI_EV_CMD_RESET,
  397. GPI_GPII_n_EV_CH_CMD_RESET,
  398. EV_STATE_ALLOCATED,
  399. CMD_TIMEOUT_MS,
  400. },
  401. {
  402. GPI_EV_CMD_DEALLOC,
  403. GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
  404. EV_STATE_NOT_ALLOCATED,
  405. CMD_TIMEOUT_MS,
  406. },
  407. };
  408. struct gpi_ring {
  409. void *pre_aligned;
  410. size_t alloc_size;
  411. phys_addr_t phys_addr;
  412. dma_addr_t dma_handle;
  413. void *base;
  414. void *wp;
  415. void *rp;
  416. u32 len;
  417. u32 el_size;
  418. u32 elements;
  419. bool configured;
  420. };
  421. struct sg_tre {
  422. void *ptr;
  423. void *wp; /* store chan wp for debugging */
  424. };
  425. struct gpi_dbg_log {
  426. void *addr;
  427. u64 time;
  428. u32 val;
  429. bool read;
  430. };
  431. struct gpi_dev {
  432. struct dma_device dma_device;
  433. struct device *dev;
  434. struct resource *res;
  435. void __iomem *regs;
  436. void *ee_base; /*ee register base address*/
  437. u32 max_gpii; /* maximum # of gpii instances available per gpi block */
  438. u32 gpii_mask; /* gpii instances available for apps */
  439. u32 static_gpii_mask; /* gpii instances assigned statically */
  440. u32 ev_factor; /* ev ring length factor */
  441. u32 smmu_cfg;
  442. dma_addr_t iova_base;
  443. size_t iova_size;
  444. struct gpii *gpiis;
  445. void *ilctxt;
  446. u32 ipc_log_lvl;
  447. u32 klog_lvl;
  448. struct dentry *dentry;
  449. bool is_le_vm;
  450. };
  451. static struct gpi_dev *gpi_dev_dbg[5];
  452. static int arr_idx;
  453. struct reg_info {
  454. char *name;
  455. u32 offset;
  456. u32 val;
  457. };
  458. static const struct reg_info gpi_debug_ev_cntxt[] = {
  459. { "CONFIG", CNTXT_0_CONFIG },
  460. { "R_LENGTH", CNTXT_1_R_LENGTH },
  461. { "BASE_LSB", CNTXT_2_RING_BASE_LSB },
  462. { "BASE_MSB", CNTXT_3_RING_BASE_MSB },
  463. { "RP_LSB", CNTXT_4_RING_RP_LSB },
  464. { "RP_MSB", CNTXT_5_RING_RP_MSB },
  465. { "WP_LSB", CNTXT_6_RING_WP_LSB },
  466. { "WP_MSB", CNTXT_7_RING_WP_MSB },
  467. { "INT_MOD", CNTXT_8_RING_INT_MOD },
  468. { "INTVEC", CNTXT_9_RING_INTVEC },
  469. { "MSI_LSB", CNTXT_10_RING_MSI_LSB },
  470. { "MSI_MSB", CNTXT_11_RING_MSI_MSB },
  471. { "RP_UPDATE_LSB", CNTXT_12_RING_RP_UPDATE_LSB },
  472. { "RP_UPDATE_MSB", CNTXT_13_RING_RP_UPDATE_MSB },
  473. { NULL },
  474. };
  475. static const struct reg_info gpi_debug_ch_cntxt[] = {
  476. { "CONFIG", CNTXT_0_CONFIG },
  477. { "R_LENGTH", CNTXT_1_R_LENGTH },
  478. { "BASE_LSB", CNTXT_2_RING_BASE_LSB },
  479. { "BASE_MSB", CNTXT_3_RING_BASE_MSB },
  480. { "RP_LSB", CNTXT_4_RING_RP_LSB },
  481. { "RP_MSB", CNTXT_5_RING_RP_MSB },
  482. { "WP_LSB", CNTXT_6_RING_WP_LSB },
  483. { "WP_MSB", CNTXT_7_RING_WP_MSB },
  484. { NULL },
  485. };
  486. static const struct reg_info gpi_debug_regs[] = {
  487. { "DEBUG_PC", GPI_DEBUG_PC_FOR_DEBUG },
  488. { "SW_RF_10", GPI_DEBUG_SW_RF_n_READ(10) },
  489. { "SW_RF_11", GPI_DEBUG_SW_RF_n_READ(11) },
  490. { "SW_RF_12", GPI_DEBUG_SW_RF_n_READ(12) },
  491. { "SW_RF_21", GPI_DEBUG_SW_RF_n_READ(21) },
  492. { NULL },
  493. };
  494. static const struct reg_info gpi_debug_qsb_regs[] = {
  495. { "QSB_LOG_SEL", GPI_DEBUG_QSB_LOG_SEL },
  496. { "QSB_LOG_CLR", GPI_DEBUG_QSB_LOG_CLR },
  497. { "QSB_LOG_ERR_TRNS_ID", GPI_DEBUG_QSB_LOG_ERR_TRNS_ID },
  498. { "QSB_LOG_0", GPI_DEBUG_QSB_LOG_0 },
  499. { "QSB_LOG_1", GPI_DEBUG_QSB_LOG_1 },
  500. { "QSB_LOG_2", GPI_DEBUG_QSB_LOG_2 },
  501. { "LAST_MISC_ID_0", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(0) },
  502. { "LAST_MISC_ID_1", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(1) },
  503. { "LAST_MISC_ID_2", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(2) },
  504. { "LAST_MISC_ID_3", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(3) },
  505. { NULL },
  506. };
  507. struct gpi_reg_table {
  508. u64 timestamp;
  509. struct reg_info *ev_cntxt_info;
  510. struct reg_info *chan[MAX_CHANNELS_PER_GPII];
  511. struct reg_info *gpi_debug_regs;
  512. struct reg_info *gpii_cntxt;
  513. struct reg_info *gpi_debug_qsb_regs;
  514. u32 ev_scratch_0;
  515. u32 ch_scratch_0[MAX_CHANNELS_PER_GPII];
  516. void *ev_ring;
  517. u32 ev_ring_len;
  518. void *ch_ring[MAX_CHANNELS_PER_GPII];
  519. u32 ch_ring_len[MAX_CHANNELS_PER_GPII];
  520. u32 error_log;
  521. };
  522. struct gpii_chan {
  523. struct virt_dma_chan vc;
  524. u32 chid;
  525. u32 seid;
  526. u8 init_config:1;
  527. enum se_protocol protocol;
  528. enum EV_PRIORITY priority; /* comes from clients DT node */
  529. struct gpii *gpii;
  530. enum gpi_ch_state ch_state;
  531. enum gpi_pm_state pm_state;
  532. void __iomem *ch_cntxt_base_reg;
  533. void __iomem *ch_cntxt_db_reg;
  534. void __iomem *ch_ring_base_lsb_reg,
  535. *ch_ring_rp_lsb_reg,
  536. *ch_ring_wp_lsb_reg;
  537. void __iomem *ch_cmd_reg;
  538. u32 req_tres; /* # of tre's client requested */
  539. u32 dir;
  540. struct gpi_ring *ch_ring;
  541. dma_addr_t gpii_chan_dma;
  542. struct gpi_client_info client_info;
  543. u32 lock_tre_set;
  544. u32 num_tre;
  545. };
  546. struct gpii {
  547. u32 gpii_id;
  548. struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
  549. struct gpi_dev *gpi_dev;
  550. enum EV_PRIORITY ev_priority;
  551. enum se_protocol protocol;
  552. int irq;
  553. void __iomem *regs; /* points to gpi top */
  554. void __iomem *ev_cntxt_base_reg;
  555. void __iomem *ev_cntxt_db_reg;
  556. void __iomem *ev_ring_base_lsb_reg,
  557. *ev_ring_rp_lsb_reg,
  558. *ev_ring_wp_lsb_reg;
  559. void __iomem *ev_cmd_reg;
  560. void __iomem *ieob_src_reg;
  561. void __iomem *ieob_clr_reg;
  562. struct mutex ctrl_lock;
  563. enum gpi_ev_state ev_state;
  564. bool configured_irq;
  565. enum gpi_pm_state pm_state;
  566. rwlock_t pm_lock;
  567. struct gpi_ring *ev_ring;
  568. dma_addr_t event_dma_addr;
  569. struct tasklet_struct ev_task; /* event processing tasklet */
  570. struct completion cmd_completion;
  571. enum gpi_cmd gpi_cmd;
  572. u32 cntxt_type_irq_msk;
  573. void *ilctxt;
  574. u32 ipc_log_lvl;
  575. u32 klog_lvl;
  576. struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
  577. atomic_t dbg_index;
  578. char label[GPI_LABEL_SIZE];
  579. struct dentry *dentry;
  580. struct gpi_reg_table dbg_reg_table;
  581. bool reg_table_dump;
  582. u32 dbg_gpi_irq_cnt;
  583. bool unlock_tre_set;
  584. bool dual_ee_sync_flag;
  585. bool is_resumed;
  586. };
  587. struct gpi_desc {
  588. struct virt_dma_desc vd;
  589. void *wp; /* points to TRE last queued during issue_pending */
  590. void *db; /* DB register to program */
  591. struct gpii_chan *gpii_chan;
  592. };
  593. #define GPI_SMMU_ATTACH BIT(0)
  594. #define GPI_SMMU_S1_BYPASS BIT(1)
  595. #define GPI_SMMU_FAST BIT(2)
  596. #define GPI_SMMU_ATOMIC BIT(3)
  597. const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
  598. GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
  599. };
  600. struct dentry *pdentry;
  601. static irqreturn_t gpi_handle_irq(int irq, void *data);
  602. static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
  603. static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
  604. static void gpi_process_events(struct gpii *gpii);
  605. static int gpi_start_chan(struct gpii_chan *gpii_chan);
  606. static void gpi_free_chan_desc(struct gpii_chan *gpii_chan);
  607. static int gpi_deep_sleep_exit_config(struct dma_chan *chan,
  608. struct dma_slave_config *config);
  609. static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
  610. {
  611. return container_of(dma_chan, struct gpii_chan, vc.chan);
  612. }
  613. static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
  614. {
  615. return container_of(vd, struct gpi_desc, vd);
  616. }
  617. static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
  618. void *addr)
  619. {
  620. return ring->phys_addr + (addr - ring->base);
  621. }
  622. static inline void *to_virtual(const struct gpi_ring *const ring,
  623. phys_addr_t addr)
  624. {
  625. return ring->base + (addr - ring->phys_addr);
  626. }
  627. #if IS_ENABLED(CONFIG_MSM_GPI_DMA_DEBUG)
  628. static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
  629. {
  630. u64 time = sched_clock();
  631. unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
  632. u32 val;
  633. val = readl_relaxed(addr);
  634. index &= (GPI_DBG_LOG_SIZE - 1);
  635. (gpii->dbg_log + index)->addr = addr;
  636. (gpii->dbg_log + index)->time = time;
  637. (gpii->dbg_log + index)->val = val;
  638. (gpii->dbg_log + index)->read = true;
  639. GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
  640. addr - gpii->regs, val);
  641. return val;
  642. }
  643. static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
  644. {
  645. u64 time = sched_clock();
  646. unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
  647. index &= (GPI_DBG_LOG_SIZE - 1);
  648. (gpii->dbg_log + index)->addr = addr;
  649. (gpii->dbg_log + index)->time = time;
  650. (gpii->dbg_log + index)->val = val;
  651. (gpii->dbg_log + index)->read = false;
  652. GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
  653. addr - gpii->regs, val);
  654. writel_relaxed(val, addr);
  655. }
  656. #else
  657. static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
  658. {
  659. u32 val = readl_relaxed(addr);
  660. GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
  661. addr - gpii->regs, val);
  662. return val;
  663. }
  664. static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
  665. {
  666. GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
  667. addr - gpii->regs, val);
  668. writel_relaxed(val, addr);
  669. }
  670. #endif
  671. /* gpi_write_reg_field - write to specific bit field */
  672. static inline void
  673. gpi_write_reg_field(struct gpii *gpii, void __iomem *addr, u32 mask, u32 shift, u32 val)
  674. {
  675. u32 tmp = gpi_read_reg(gpii, addr);
  676. tmp &= ~mask;
  677. val = tmp | ((val << shift) & mask);
  678. gpi_write_reg(gpii, addr, val);
  679. }
  680. static void gpi_dump_cntxt_regs(struct gpii *gpii)
  681. {
  682. int chan;
  683. u32 reg_val;
  684. u32 offset;
  685. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  686. offset = GPI_GPII_n_CH_k_CNTXT_0_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  687. reg_val = readl_relaxed(gpii->regs + offset);
  688. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_CH_%d_CNTXT_0 reg_val:0x%x\n",
  689. gpii->gpii_id, chan, reg_val);
  690. }
  691. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  692. offset = GPI_GPII_n_CH_k_CNTXT_2_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  693. reg_val = readl_relaxed(gpii->regs + offset);
  694. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_CH_%d_CNTXT_2 reg_val:0x%x\n",
  695. gpii->gpii_id, chan, reg_val);
  696. }
  697. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  698. offset = GPI_GPII_n_CH_k_CNTXT_4_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  699. reg_val = readl_relaxed(gpii->regs + offset);
  700. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_CH_%d_CNTXT_4 reg_val:0x%x\n",
  701. gpii->gpii_id, chan, reg_val);
  702. }
  703. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  704. offset = GPI_GPII_n_CH_k_CNTXT_6_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  705. reg_val = readl_relaxed(gpii->regs + offset);
  706. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_CH_%d_CNTXT_6 reg_val:0x%x\n",
  707. gpii->gpii_id, chan, reg_val);
  708. }
  709. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  710. offset = GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  711. reg_val = readl_relaxed(gpii->regs + offset);
  712. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_EV_%d_CNTXT_0 reg_val:0x%x\n",
  713. gpii->gpii_id, chan, reg_val);
  714. }
  715. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  716. offset = GPI_GPII_n_EV_CH_k_CNTXT_2_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  717. reg_val = readl_relaxed(gpii->regs + offset);
  718. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_EV_%d_CNTXT_2 reg_val:0x%x\n",
  719. gpii->gpii_id, chan, reg_val);
  720. }
  721. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  722. offset = GPI_GPII_n_EV_CH_k_CNTXT_4_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  723. reg_val = readl_relaxed(gpii->regs + offset);
  724. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_EV_%d_CNTXT_4 reg_val:0x%x\n",
  725. gpii->gpii_id, chan, reg_val);
  726. }
  727. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  728. offset = GPI_GPII_n_EV_CH_k_CNTXT_6_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid);
  729. reg_val = readl_relaxed(gpii->regs + offset);
  730. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_EV_%d_CNTXT_6 reg_val:0x%x\n",
  731. gpii->gpii_id, chan, reg_val);
  732. }
  733. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  734. offset = GPI_GPII_n_CH_k_RE_FETCH_READ_PTR(gpii->gpii_id,
  735. gpii->gpii_chan[chan].chid);
  736. reg_val = readl_relaxed(gpii->regs + offset);
  737. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_GPII_%d_CH_%d_RE_FETCH_READ_PTRg_val:0x%x\n",
  738. gpii->gpii_id, chan, reg_val);
  739. }
  740. }
  741. static void gpi_dump_debug_reg(struct gpii *gpii)
  742. {
  743. struct gpi_reg_table *dbg_reg_table = &gpii->dbg_reg_table;
  744. struct reg_info *reg_info;
  745. int chan;
  746. const gfp_t gfp = GFP_ATOMIC;
  747. const struct reg_info gpii_cntxt[] = {
  748. { "TYPE_IRQ", GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS
  749. (gpii->gpii_id) },
  750. { "TYPE_IRQ_MSK", GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
  751. (gpii->gpii_id) },
  752. { "CH_IRQ", GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS
  753. (gpii->gpii_id) },
  754. { "EV_IRQ", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS
  755. (gpii->gpii_id) },
  756. { "CH_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
  757. (gpii->gpii_id) },
  758. { "EV_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
  759. (gpii->gpii_id) },
  760. { "IEOB_IRQ", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS
  761. (gpii->gpii_id) },
  762. { "IEOB_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
  763. (gpii->gpii_id) },
  764. { "GLOB_IRQ", GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS
  765. (gpii->gpii_id) },
  766. { NULL },
  767. };
  768. gpi_dump_cntxt_regs(gpii);
  769. dbg_reg_table->timestamp = sched_clock();
  770. if (!dbg_reg_table->gpii_cntxt) {
  771. dbg_reg_table->gpii_cntxt = kzalloc(sizeof(gpii_cntxt), gfp);
  772. if (!dbg_reg_table->gpii_cntxt)
  773. return;
  774. memcpy((void *)dbg_reg_table->gpii_cntxt, (void *)gpii_cntxt,
  775. sizeof(gpii_cntxt));
  776. }
  777. /* log gpii cntxt */
  778. reg_info = dbg_reg_table->gpii_cntxt;
  779. for (; reg_info->name; reg_info++) {
  780. reg_info->val = readl_relaxed(gpii->regs + reg_info->offset);
  781. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_cntxt Reg:%s addr:0x%x->val:0x%x\n",
  782. reg_info->name, reg_info->offset, reg_info->val);
  783. }
  784. if (!dbg_reg_table->ev_cntxt_info) {
  785. dbg_reg_table->ev_cntxt_info =
  786. kzalloc(sizeof(gpi_debug_ev_cntxt), gfp);
  787. if (!dbg_reg_table->ev_cntxt_info)
  788. return;
  789. memcpy((void *)dbg_reg_table->ev_cntxt_info,
  790. (void *)gpi_debug_ev_cntxt, sizeof(gpi_debug_ev_cntxt));
  791. }
  792. /* log ev cntxt */
  793. reg_info = dbg_reg_table->ev_cntxt_info;
  794. for (; reg_info->name; reg_info++) {
  795. reg_info->val = readl_relaxed(gpii->ev_cntxt_base_reg +
  796. reg_info->offset);
  797. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_ev_cntxt Reg:%s addr:0x%x->val:0x%x\n",
  798. reg_info->name, reg_info->offset, reg_info->val);
  799. }
  800. /* dump channel cntxt registers */
  801. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  802. if (!dbg_reg_table->chan[chan]) {
  803. dbg_reg_table->chan[chan] =
  804. kzalloc(sizeof(gpi_debug_ch_cntxt), gfp);
  805. if (!dbg_reg_table->chan[chan])
  806. return;
  807. memcpy((void *)dbg_reg_table->chan[chan],
  808. (void *)gpi_debug_ch_cntxt,
  809. sizeof(gpi_debug_ch_cntxt));
  810. }
  811. reg_info = dbg_reg_table->chan[chan];
  812. for (; reg_info->name; reg_info++) {
  813. reg_info->val =
  814. readl_relaxed(
  815. gpii->gpii_chan[chan].ch_cntxt_base_reg +
  816. reg_info->offset);
  817. GPII_ERR(gpii, GPI_DBG_COMMON,
  818. "GPI_ch%d Reg:%s addr:0x%x->val:0x%x\n",
  819. chan, reg_info->name, reg_info->offset, reg_info->val);
  820. }
  821. }
  822. /* Skip dumping gpi debug and qsb registers for levm */
  823. if (!gpii->gpi_dev->is_le_vm) {
  824. if (!dbg_reg_table->gpi_debug_regs) {
  825. dbg_reg_table->gpi_debug_regs =
  826. kzalloc(sizeof(gpi_debug_regs), gfp);
  827. if (!dbg_reg_table->gpi_debug_regs)
  828. return;
  829. memcpy((void *)dbg_reg_table->gpi_debug_regs,
  830. (void *)gpi_debug_regs, sizeof(gpi_debug_regs));
  831. }
  832. /* log debug register */
  833. reg_info = dbg_reg_table->gpi_debug_regs;
  834. for (; reg_info->name; reg_info++) {
  835. reg_info->val = readl_relaxed(gpii->gpi_dev->regs + reg_info->offset);
  836. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_dbg Reg:%s addr:0x%x->val:0x%x\n",
  837. reg_info->name, reg_info->offset, reg_info->val);
  838. }
  839. if (!dbg_reg_table->gpi_debug_qsb_regs) {
  840. dbg_reg_table->gpi_debug_qsb_regs =
  841. kzalloc(sizeof(gpi_debug_qsb_regs), gfp);
  842. if (!dbg_reg_table->gpi_debug_qsb_regs)
  843. return;
  844. memcpy((void *)dbg_reg_table->gpi_debug_qsb_regs,
  845. (void *)gpi_debug_qsb_regs,
  846. sizeof(gpi_debug_qsb_regs));
  847. }
  848. /* log QSB register */
  849. reg_info = dbg_reg_table->gpi_debug_qsb_regs;
  850. for (; reg_info->name; reg_info++)
  851. reg_info->val = readl_relaxed(gpii->gpi_dev->regs + reg_info->offset);
  852. }
  853. /* dump scratch registers */
  854. dbg_reg_table->ev_scratch_0 = readl_relaxed(gpii->regs +
  855. GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id));
  856. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_ev_scratch Reg addr:0x%x->val:0x%x\n",
  857. GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id),
  858. dbg_reg_table->ev_scratch_0);
  859. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  860. dbg_reg_table->ch_scratch_0[chan] = readl_relaxed(gpii->regs +
  861. GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
  862. gpii->gpii_chan[chan].chid));
  863. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI_ch_scratch Reg addr:0x%x->val:0x%x\n",
  864. GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id, gpii->gpii_chan[chan].chid),
  865. dbg_reg_table->ch_scratch_0[chan]);
  866. }
  867. /* Copy the ev ring */
  868. if (!dbg_reg_table->ev_ring) {
  869. dbg_reg_table->ev_ring_len = gpii->ev_ring->len;
  870. dbg_reg_table->ev_ring =
  871. kzalloc(dbg_reg_table->ev_ring_len, gfp);
  872. if (!dbg_reg_table->ev_ring)
  873. return;
  874. }
  875. memcpy(dbg_reg_table->ev_ring, gpii->ev_ring->base,
  876. dbg_reg_table->ev_ring_len);
  877. /* Copy Transfer rings */
  878. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  879. struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
  880. if (!dbg_reg_table->ch_ring[chan]) {
  881. dbg_reg_table->ch_ring_len[chan] =
  882. gpii_chan->ch_ring->len;
  883. dbg_reg_table->ch_ring[chan] =
  884. kzalloc(dbg_reg_table->ch_ring_len[chan], gfp);
  885. if (!dbg_reg_table->ch_ring[chan])
  886. return;
  887. }
  888. memcpy(dbg_reg_table->ch_ring[chan], gpii_chan->ch_ring->base,
  889. dbg_reg_table->ch_ring_len[chan]);
  890. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI Error log chan:%d base:%p\n",
  891. chan, gpii_chan->ch_ring->base);
  892. }
  893. dbg_reg_table->error_log = readl_relaxed(gpii->regs +
  894. GPI_GPII_n_ERROR_LOG_OFFS(gpii->gpii_id));
  895. GPII_ERR(gpii, GPI_DBG_COMMON, "GPI Error log Reg addr:0x%x->val:0x%x\n",
  896. GPI_GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), dbg_reg_table->error_log);
  897. GPII_ERR(gpii, GPI_DBG_COMMON, "Global IRQ handling Exit\n");
  898. }
  899. void gpi_dump_for_geni(struct dma_chan *chan)
  900. {
  901. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  902. struct gpii *gpii = gpii_chan->gpii;
  903. gpi_dump_debug_reg(gpii);
  904. }
  905. EXPORT_SYMBOL(gpi_dump_for_geni);
  906. static void gpi_disable_interrupts(struct gpii *gpii)
  907. {
  908. struct {
  909. u32 offset;
  910. u32 mask;
  911. u32 shift;
  912. u32 val;
  913. } default_reg[] = {
  914. {
  915. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
  916. (gpii->gpii_id),
  917. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
  918. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
  919. 0,
  920. },
  921. {
  922. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
  923. (gpii->gpii_id),
  924. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
  925. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
  926. 0,
  927. },
  928. {
  929. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
  930. (gpii->gpii_id),
  931. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
  932. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
  933. 0,
  934. },
  935. {
  936. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
  937. (gpii->gpii_id),
  938. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
  939. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
  940. 0,
  941. },
  942. {
  943. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
  944. (gpii->gpii_id),
  945. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
  946. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
  947. 0,
  948. },
  949. {
  950. GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
  951. (gpii->gpii_id),
  952. GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
  953. GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
  954. 0,
  955. },
  956. {
  957. GPI_GPII_n_CNTXT_INTSET_OFFS
  958. (gpii->gpii_id),
  959. GPI_GPII_n_CNTXT_INTSET_BMSK,
  960. GPI_GPII_n_CNTXT_INTSET_SHFT,
  961. 0,
  962. },
  963. { 0 },
  964. };
  965. int i;
  966. for (i = 0; default_reg[i].offset; i++)
  967. gpi_write_reg_field(gpii, gpii->regs +
  968. default_reg[i].offset,
  969. default_reg[i].mask,
  970. default_reg[i].shift,
  971. default_reg[i].val);
  972. gpii->cntxt_type_irq_msk = 0;
  973. free_irq(gpii->irq, gpii);
  974. gpii->configured_irq = false;
  975. }
  976. /* configure and enable interrupts */
  977. static int gpi_config_interrupts(struct gpii *gpii,
  978. enum gpii_irq_settings settings,
  979. bool mask)
  980. {
  981. int ret;
  982. int i;
  983. const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
  984. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
  985. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
  986. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
  987. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
  988. struct {
  989. u32 offset;
  990. u32 mask;
  991. u32 shift;
  992. u32 val;
  993. } default_reg[] = {
  994. {
  995. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
  996. (gpii->gpii_id),
  997. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
  998. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
  999. def_type,
  1000. },
  1001. {
  1002. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
  1003. (gpii->gpii_id),
  1004. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
  1005. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
  1006. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
  1007. },
  1008. {
  1009. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
  1010. (gpii->gpii_id),
  1011. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
  1012. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
  1013. GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
  1014. },
  1015. {
  1016. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
  1017. (gpii->gpii_id),
  1018. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
  1019. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
  1020. GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
  1021. },
  1022. {
  1023. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
  1024. (gpii->gpii_id),
  1025. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
  1026. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
  1027. GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
  1028. },
  1029. {
  1030. GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
  1031. (gpii->gpii_id),
  1032. GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
  1033. GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
  1034. GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
  1035. },
  1036. {
  1037. GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
  1038. (gpii->gpii_id),
  1039. U32_MAX,
  1040. 0,
  1041. 0x0,
  1042. },
  1043. {
  1044. GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
  1045. (gpii->gpii_id),
  1046. U32_MAX,
  1047. 0,
  1048. 0x0,
  1049. },
  1050. {
  1051. GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
  1052. (gpii->gpii_id),
  1053. U32_MAX,
  1054. 0,
  1055. 0x0,
  1056. },
  1057. {
  1058. GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
  1059. (gpii->gpii_id),
  1060. U32_MAX,
  1061. 0,
  1062. 0x0,
  1063. },
  1064. {
  1065. GPI_GPII_n_CNTXT_INTSET_OFFS
  1066. (gpii->gpii_id),
  1067. GPI_GPII_n_CNTXT_INTSET_BMSK,
  1068. GPI_GPII_n_CNTXT_INTSET_SHFT,
  1069. 0x01,
  1070. },
  1071. {
  1072. GPI_GPII_n_ERROR_LOG_OFFS
  1073. (gpii->gpii_id),
  1074. U32_MAX,
  1075. 0,
  1076. 0x00,
  1077. },
  1078. { 0 },
  1079. };
  1080. GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
  1081. (gpii->configured_irq) ? 'F' : 'T',
  1082. (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
  1083. (mask) ? 'T' : 'F');
  1084. if (!gpii->configured_irq) {
  1085. ret = request_irq(gpii->irq, gpi_handle_irq, IRQF_TRIGGER_HIGH,
  1086. gpii->label, gpii);
  1087. if (ret < 0) {
  1088. GPII_CRITIC(gpii, GPI_DBG_COMMON,
  1089. "error request irq:%d ret:%d\n",
  1090. gpii->irq, ret);
  1091. return ret;
  1092. }
  1093. }
  1094. if (settings == MASK_IEOB_SETTINGS) {
  1095. /*
  1096. * GPII only uses one EV ring per gpii so we can globally
  1097. * enable/disable IEOB interrupt
  1098. */
  1099. if (mask)
  1100. gpii->cntxt_type_irq_msk |=
  1101. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
  1102. else
  1103. gpii->cntxt_type_irq_msk &=
  1104. ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
  1105. gpi_write_reg_field(gpii, gpii->regs +
  1106. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
  1107. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
  1108. GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
  1109. gpii->cntxt_type_irq_msk);
  1110. } else {
  1111. for (i = 0; default_reg[i].offset; i++)
  1112. gpi_write_reg_field(gpii, gpii->regs +
  1113. default_reg[i].offset,
  1114. default_reg[i].mask,
  1115. default_reg[i].shift,
  1116. default_reg[i].val);
  1117. gpii->cntxt_type_irq_msk = def_type;
  1118. }
  1119. gpii->configured_irq = true;
  1120. return 0;
  1121. }
  1122. /**
  1123. * gsi_se_common_iommu_unmap_buf() - Unmap a single buffer from QUPv3 context bank
  1124. * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
  1125. * @iova: Pointer in which the mapped virtual address is stored.
  1126. * @size: Size of the buffer.
  1127. * @dir: Direction of the DMA transfer.
  1128. *
  1129. * This function is used to unmap an already mapped buffer from the
  1130. * QUPv3 context bank device space.
  1131. *
  1132. * Return: None.
  1133. */
  1134. static void gsi_se_common_iommu_unmap_buf(struct device *wrapper_dev, dma_addr_t *iova,
  1135. size_t size, enum dma_data_direction dir)
  1136. {
  1137. if (!dma_mapping_error(wrapper_dev, *iova))
  1138. dma_unmap_single(wrapper_dev, *iova, size, dir);
  1139. }
  1140. /**
  1141. * gsi_common_tre_process() - Process received TRE's from GSI HW
  1142. * @gsi: Base address of the gsi common structure.
  1143. * @num_xfers: number of messages count.
  1144. * @num_msg_per_irq: num of messages per irq.
  1145. * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
  1146. *
  1147. * This function is used to process received TRE's from GSI HW.
  1148. * And also used for error case, it will clear and unmap all pending transfers.
  1149. *
  1150. * Return: None.
  1151. */
  1152. void gsi_common_tre_process(struct gsi_common *gsi, u32 num_xfers, u32 num_msg_per_irq,
  1153. struct device *wrapper_dev)
  1154. {
  1155. u32 msg_xfer_cnt;
  1156. int wr_idx = 0;
  1157. struct gsi_tre_queue *tx_tre_q = &gsi->tx.tre_queue;
  1158. /* Error case we need to unmap all messages.
  1159. * Regular working case unmapping only processed messages.
  1160. */
  1161. if (*gsi->protocol_err)
  1162. msg_xfer_cnt = tx_tre_q->msg_cnt;
  1163. else
  1164. msg_xfer_cnt = atomic_read(&tx_tre_q->irq_cnt) * num_msg_per_irq;
  1165. for (; tx_tre_q->unmap_msg_cnt < msg_xfer_cnt; tx_tre_q->unmap_msg_cnt++) {
  1166. if (tx_tre_q->unmap_msg_cnt == num_xfers) {
  1167. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1168. "%s:last %d msg unmapped msg_cnt:%d\n",
  1169. __func__, num_xfers, msg_xfer_cnt);
  1170. break;
  1171. }
  1172. tx_tre_q->freed_msg_cnt++;
  1173. wr_idx = tx_tre_q->unmap_msg_cnt % GSI_MAX_NUM_TRE_MSGS;
  1174. if (tx_tre_q->len[wr_idx % GSI_MAX_NUM_TRE_MSGS] > GSI_MAX_IMMEDIATE_DMA_LEN) {
  1175. gsi_se_common_iommu_unmap_buf(wrapper_dev,
  1176. &tx_tre_q->dma_buf[wr_idx % GSI_MAX_NUM_TRE_MSGS],
  1177. tx_tre_q->len[wr_idx % GSI_MAX_NUM_TRE_MSGS],
  1178. DMA_TO_DEVICE);
  1179. }
  1180. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1181. "%s:unmap_msg_cnt %d freed_cnt:%d wr_idx:%d len:%d\n",
  1182. __func__, tx_tre_q->unmap_msg_cnt, tx_tre_q->freed_msg_cnt,
  1183. wr_idx, tx_tre_q->len[wr_idx % GSI_MAX_NUM_TRE_MSGS]);
  1184. }
  1185. }
  1186. EXPORT_SYMBOL_GPL(gsi_common_tre_process);
  1187. /**
  1188. * gsi_common_tx_tre_optimization() - Process received TRE's from GSI HW
  1189. * @gsi: Base address of the gsi common structure.
  1190. * @num_xfers: number of messages count.
  1191. * @num_msg_per_irq: num of messages per irq.
  1192. * @xfer_timeout: xfer timeout value.
  1193. * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
  1194. *
  1195. * This function is used to optimize dma tre's, it keeps always HW busy.
  1196. *
  1197. * Return: Returning timeout value
  1198. */
  1199. int gsi_common_tx_tre_optimization(struct gsi_common *gsi, u32 num_xfers, u32 num_msg_per_irq,
  1200. u32 xfer_timeout, struct device *wrapper_dev)
  1201. {
  1202. int timeout = 1, i;
  1203. int max_irq_cnt;
  1204. max_irq_cnt = num_xfers / num_msg_per_irq;
  1205. if (num_xfers % num_msg_per_irq)
  1206. max_irq_cnt++;
  1207. for (i = 0; i < max_irq_cnt; i++) {
  1208. if (max_irq_cnt != atomic_read(&gsi->tx.tre_queue.irq_cnt)) {
  1209. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1210. "%s: calling wait for_completion ix:%d irq_cnt:%d\n",
  1211. __func__, i, atomic_read(&gsi->tx.tre_queue.irq_cnt));
  1212. timeout = wait_for_completion_timeout(gsi->xfer,
  1213. xfer_timeout);
  1214. reinit_completion(gsi->xfer);
  1215. if (!timeout) {
  1216. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1217. "%s: msg xfer timeout\n", __func__);
  1218. return timeout;
  1219. }
  1220. }
  1221. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1222. "%s: maxirq_cnt:%d i:%d\n", __func__, max_irq_cnt, i);
  1223. gsi_common_tre_process(gsi, num_xfers, num_msg_per_irq, wrapper_dev);
  1224. if (num_xfers > gsi->tx.tre_queue.msg_cnt)
  1225. return timeout;
  1226. }
  1227. /* process received tre's */
  1228. if (timeout)
  1229. gsi_common_tre_process(gsi, num_xfers, num_msg_per_irq, wrapper_dev);
  1230. GSI_SE_DBG(gsi->ipc, false, gsi->dev,
  1231. "%s: timeout :%d\n", __func__, timeout);
  1232. return timeout;
  1233. }
  1234. EXPORT_SYMBOL_GPL(gsi_common_tx_tre_optimization);
  1235. /**
  1236. * gsi_common_ev_cb() - gsi common event call back
  1237. * @ch: Base address of dma channel
  1238. * @cb_str: Base address of call back string
  1239. * @ptr: Base address of gsi common structure
  1240. *
  1241. * Return: None
  1242. */
  1243. static void gsi_common_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str, void *ptr)
  1244. {
  1245. struct gsi_common *gsi = ptr;
  1246. if (!gsi) {
  1247. pr_err("%s: Invalid ev_cb buffer\n", __func__);
  1248. return;
  1249. }
  1250. GSI_SE_DBG(gsi->ipc, false, gsi->dev, "%s: protocol:%d\n", __func__, gsi->protocol);
  1251. gsi->ev_cb_fun(ch, cb_str, ptr);
  1252. }
  1253. /**
  1254. * gsi_common_tx_cb() - gsi common tx callback
  1255. * @ptr: Base address of gsi common structure
  1256. *
  1257. * Return: None
  1258. */
  1259. static void gsi_common_tx_cb(void *ptr)
  1260. {
  1261. struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
  1262. struct gsi_common *gsi;
  1263. if (!(tx_cb && tx_cb->userdata)) {
  1264. pr_err("%s: Invalid tx_cb buffer\n", __func__);
  1265. return;
  1266. }
  1267. gsi = (struct gsi_common *)tx_cb->userdata;
  1268. GSI_SE_DBG(gsi->ipc, false, gsi->dev, "%s: protocol:%d\n", __func__, gsi->protocol);
  1269. gsi->tx.cb_fun(tx_cb);
  1270. }
  1271. /**
  1272. * gsi_common_rx_cb() - gsi common rx callback
  1273. * @ptr: Base address of gsi common structure
  1274. *
  1275. * Return: None
  1276. */
  1277. static void gsi_common_rx_cb(void *ptr)
  1278. {
  1279. struct msm_gpi_dma_async_tx_cb_param *rx_cb = ptr;
  1280. struct gsi_common *gsi;
  1281. if (!(rx_cb && rx_cb->userdata)) {
  1282. pr_err("%s: Invalid rx_cb buffer\n", __func__);
  1283. return;
  1284. }
  1285. gsi = (struct gsi_common *)rx_cb->userdata;
  1286. gsi->rx.cb_fun(rx_cb);
  1287. GSI_SE_DBG(gsi->ipc, false, gsi->dev, "%s: protocol:%d\n", __func__, gsi->protocol);
  1288. }
  1289. /**
  1290. * gsi_common_clear_tre_indexes() - gsi common queue clear tre indexes
  1291. * @gsi_q: Base address of gsi common queue
  1292. *
  1293. * Return: None
  1294. */
  1295. void gsi_common_clear_tre_indexes(struct gsi_tre_queue *gsi_q)
  1296. {
  1297. gsi_q->msg_cnt = 0;
  1298. gsi_q->unmap_msg_cnt = 0;
  1299. gsi_q->freed_msg_cnt = 0;
  1300. atomic_set(&gsi_q->irq_cnt, 0);
  1301. }
  1302. EXPORT_SYMBOL_GPL(gsi_common_clear_tre_indexes);
  1303. /**
  1304. * gsi_common_fill_tre_buf() - gsi common fill tre buffers
  1305. * @gsi: Base address of gsi common
  1306. * @tx_chan: dma transfer channel type
  1307. *
  1308. * Return: Returns tre count
  1309. */
  1310. int gsi_common_fill_tre_buf(struct gsi_common *gsi, bool tx_chan)
  1311. {
  1312. struct gsi_xfer_param *xfer;
  1313. int tre_cnt = 0, i;
  1314. int index = 0;
  1315. if (tx_chan)
  1316. xfer = &gsi->tx;
  1317. else
  1318. xfer = &gsi->rx;
  1319. for (i = 0; i < GSI_MAX_TRE_TYPES; i++) {
  1320. if (xfer->tre.flags & (1 << i))
  1321. tre_cnt++;
  1322. }
  1323. sg_init_table(xfer->sg, tre_cnt);
  1324. if (xfer->tre.flags & LOCK_TRE_SET)
  1325. sg_set_buf(&xfer->sg[index++], &xfer->tre.lock_t, sizeof(xfer->tre.lock_t));
  1326. if (xfer->tre.flags & CONFIG_TRE_SET)
  1327. sg_set_buf(&xfer->sg[index++], &xfer->tre.config_t, sizeof(xfer->tre.config_t));
  1328. if (xfer->tre.flags & GO_TRE_SET)
  1329. sg_set_buf(&xfer->sg[index++], &xfer->tre.go_t, sizeof(xfer->tre.go_t));
  1330. if (xfer->tre.flags & DMA_TRE_SET)
  1331. sg_set_buf(&xfer->sg[index++], &xfer->tre.dma_t, sizeof(xfer->tre.dma_t));
  1332. if (xfer->tre.flags & UNLOCK_TRE_SET)
  1333. sg_set_buf(&xfer->sg[index++], &xfer->tre.unlock_t, sizeof(xfer->tre.unlock_t));
  1334. GSI_SE_DBG(gsi->ipc, false, gsi->dev, "%s: tre_cnt:%d chan:%d flags:0x%x\n",
  1335. __func__, tre_cnt, tx_chan, xfer->tre.flags);
  1336. return tre_cnt;
  1337. }
  1338. EXPORT_SYMBOL_GPL(gsi_common_fill_tre_buf);
  1339. /**
  1340. * gsi_common_doorbell_hit() - gsi common doorbell hit
  1341. * @gsi: Base address of gsi common
  1342. * @tx_chan: dma transfer channel type
  1343. *
  1344. * Return: Returns success or failure
  1345. */
  1346. static int gsi_common_doorbell_hit(struct gsi_common *gsi, bool tx_chan)
  1347. {
  1348. dma_cookie_t cookie;
  1349. struct dma_async_tx_descriptor *dma_desc;
  1350. struct dma_chan *dma_ch;
  1351. if (tx_chan) {
  1352. dma_desc = gsi->tx.desc;
  1353. dma_ch = gsi->tx.ch;
  1354. } else {
  1355. dma_desc = gsi->rx.desc;
  1356. dma_ch = gsi->rx.ch;
  1357. }
  1358. reinit_completion(gsi->xfer);
  1359. cookie = dmaengine_submit(dma_desc);
  1360. if (dma_submit_error(cookie)) {
  1361. GSI_SE_ERR(gsi->ipc, true, gsi->dev,
  1362. "%s: dmaengine_submit failed (%d)\n", __func__, cookie);
  1363. return -EINVAL;
  1364. }
  1365. dma_async_issue_pending(dma_ch);
  1366. return 0;
  1367. }
  1368. /**
  1369. * gsi_common_prep_desc_and_submit() - gsi common prepare descriptor and gsi submit
  1370. * @gsi: Base address of gsi common
  1371. * @segs: Num of segments
  1372. * @tx_chan: dma transfer channel type
  1373. * @skip_callbacks: flag used to register callbacks
  1374. *
  1375. * Return: Returns success or failure
  1376. */
  1377. int gsi_common_prep_desc_and_submit(struct gsi_common *gsi, int segs, bool tx_chan,
  1378. bool skip_callbacks)
  1379. {
  1380. struct gsi_xfer_param *xfer;
  1381. struct dma_async_tx_descriptor *geni_desc = NULL;
  1382. /* tx channel process */
  1383. if (tx_chan) {
  1384. xfer = &gsi->tx;
  1385. geni_desc = dmaengine_prep_slave_sg(gsi->tx.ch, gsi->tx.sg, segs, DMA_MEM_TO_DEV,
  1386. (DMA_PREP_INTERRUPT | DMA_CTRL_ACK));
  1387. if (!geni_desc) {
  1388. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "prep_slave_sg for tx failed\n");
  1389. return -ENOMEM;
  1390. }
  1391. if (skip_callbacks) {
  1392. geni_desc->callback = NULL;
  1393. geni_desc->callback_param = NULL;
  1394. } else {
  1395. geni_desc->callback = gsi_common_tx_cb;
  1396. geni_desc->callback_param = &gsi->tx.cb;
  1397. }
  1398. gsi->tx.desc = geni_desc;
  1399. return gsi_common_doorbell_hit(gsi, tx_chan);
  1400. }
  1401. /* Rx channel process */
  1402. geni_desc = dmaengine_prep_slave_sg(gsi->rx.ch, gsi->rx.sg, segs, DMA_DEV_TO_MEM,
  1403. (DMA_PREP_INTERRUPT | DMA_CTRL_ACK));
  1404. if (!geni_desc) {
  1405. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "prep_slave_sg for rx failed\n");
  1406. return -ENOMEM;
  1407. }
  1408. geni_desc->callback = gsi_common_rx_cb;
  1409. geni_desc->callback_param = &gsi->rx.cb;
  1410. gsi->rx.desc = geni_desc;
  1411. return gsi_common_doorbell_hit(gsi, tx_chan);
  1412. }
  1413. EXPORT_SYMBOL_GPL(gsi_common_prep_desc_and_submit);
  1414. /**
  1415. * geni_gsi_common_request_channel() - gsi common dma request channel
  1416. * @gsi: Base address of gsi common
  1417. *
  1418. * Return: Returns success or failure
  1419. */
  1420. int geni_gsi_common_request_channel(struct gsi_common *gsi)
  1421. {
  1422. int ret = 0;
  1423. if (!gsi->tx.ch) {
  1424. gsi->tx.ch = dma_request_slave_channel(gsi->dev, "tx");
  1425. if (!gsi->tx.ch) {
  1426. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "tx dma req slv chan ret:%d\n", -EIO);
  1427. return -EIO;
  1428. }
  1429. }
  1430. if (!gsi->rx.ch) {
  1431. gsi->rx.ch = dma_request_slave_channel(gsi->dev, "rx");
  1432. if (!gsi->rx.ch) {
  1433. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "rx dma req slv chan ret:%d\n", -EIO);
  1434. dma_release_channel(gsi->tx.ch);
  1435. return -EIO;
  1436. }
  1437. }
  1438. gsi->tx.ev.init.callback = gsi_common_ev_cb;
  1439. gsi->tx.ev.init.cb_param = gsi;
  1440. gsi->tx.ev.cmd = MSM_GPI_INIT;
  1441. gsi->tx.ch->private = &gsi->tx.ev;
  1442. ret = dmaengine_slave_config(gsi->tx.ch, NULL);
  1443. if (ret) {
  1444. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "tx dma slave config ret:%d\n", ret);
  1445. goto dmaengine_slave_config_fail;
  1446. }
  1447. gsi->rx.ev.init.cb_param = gsi;
  1448. gsi->rx.ev.init.callback = gsi_common_ev_cb;
  1449. gsi->rx.ev.cmd = MSM_GPI_INIT;
  1450. gsi->rx.ch->private = &gsi->rx.ev;
  1451. ret = dmaengine_slave_config(gsi->rx.ch, NULL);
  1452. if (ret) {
  1453. GSI_SE_ERR(gsi->ipc, true, gsi->dev, "rx dma slave config ret:%d\n", ret);
  1454. goto dmaengine_slave_config_fail;
  1455. }
  1456. gsi->tx.cb.userdata = gsi;
  1457. gsi->rx.cb.userdata = gsi;
  1458. gsi->req_chan = true;
  1459. return ret;
  1460. dmaengine_slave_config_fail:
  1461. dma_release_channel(gsi->tx.ch);
  1462. dma_release_channel(gsi->rx.ch);
  1463. gsi->tx.ch = NULL;
  1464. gsi->rx.ch = NULL;
  1465. return ret;
  1466. }
  1467. EXPORT_SYMBOL_GPL(geni_gsi_common_request_channel);
  1468. /* Sends gpii event or channel command */
  1469. static int gpi_send_cmd(struct gpii *gpii,
  1470. struct gpii_chan *gpii_chan,
  1471. enum gpi_cmd gpi_cmd)
  1472. {
  1473. u32 chid = MAX_CHANNELS_PER_GPII;
  1474. u32 cmd;
  1475. u32 offset, irq_stat;
  1476. unsigned long timeout;
  1477. void __iomem *cmd_reg;
  1478. if (gpi_cmd >= GPI_MAX_CMD)
  1479. return -EINVAL;
  1480. if (IS_CHAN_CMD(gpi_cmd))
  1481. chid = gpii_chan->chid;
  1482. GPII_INFO(gpii, chid,
  1483. "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
  1484. /* send opcode and wait for completion */
  1485. reinit_completion(&gpii->cmd_completion);
  1486. gpii->gpi_cmd = gpi_cmd;
  1487. cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
  1488. gpii->ev_cmd_reg;
  1489. cmd = IS_CHAN_CMD(gpi_cmd) ?
  1490. GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
  1491. GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
  1492. gpi_write_reg(gpii, cmd_reg, cmd);
  1493. timeout = wait_for_completion_timeout(&gpii->cmd_completion,
  1494. msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
  1495. if (!timeout) {
  1496. offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
  1497. irq_stat = gpi_read_reg(gpii, gpii->regs + offset);
  1498. GPII_ERR(gpii, chid,
  1499. "cmd: %s completion timeout irq_status=0x%x\n",
  1500. TO_GPI_CMD_STR(gpi_cmd), irq_stat);
  1501. return -EIO;
  1502. }
  1503. /* confirm new ch state is correct , if the cmd is a state change cmd */
  1504. if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
  1505. return 0;
  1506. if (IS_CHAN_CMD(gpi_cmd) &&
  1507. gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
  1508. return 0;
  1509. if (!IS_CHAN_CMD(gpi_cmd) &&
  1510. gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
  1511. return 0;
  1512. return -EIO;
  1513. }
  1514. /*
  1515. * geni_gsi_ch_start() - gsi channel commands to start GSI RX and TX channles
  1516. *
  1517. * @chan: gsi channel handle
  1518. *
  1519. * Return: Returns success or failure
  1520. */
  1521. int geni_gsi_ch_start(struct dma_chan *chan)
  1522. {
  1523. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  1524. struct gpii *gpii = gpii_chan->gpii;
  1525. int i, ret = 0;
  1526. GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
  1527. mutex_lock(&gpii->ctrl_lock);
  1528. for (i = 1; i >= 0; i--) {
  1529. gpii_chan = &gpii->gpii_chan[i];
  1530. GPII_INFO(gpii, gpii_chan->chid, "Start chan:%d\n", i);
  1531. /* send start command to start the channels */
  1532. ret = gpi_start_chan(gpii_chan);
  1533. if (ret) {
  1534. GPII_ERR(gpii, gpii_chan->chid,
  1535. "Error Starting Channel ret:%d\n", ret);
  1536. mutex_unlock(&gpii->ctrl_lock);
  1537. return -ECONNRESET;
  1538. }
  1539. }
  1540. mutex_unlock(&gpii->ctrl_lock);
  1541. return ret;
  1542. }
  1543. EXPORT_SYMBOL_GPL(geni_gsi_ch_start);
  1544. /*
  1545. * gpi_terminate_channel() - Stop gpi rx and tx channels and
  1546. * if fails do reset of the channels
  1547. * @chan: gsi channel handle
  1548. *
  1549. * Return: Returns success or failure
  1550. */
  1551. int gpi_terminate_channel(struct gpii_chan *gpii_chan)
  1552. {
  1553. struct gpii *gpii = gpii_chan->gpii;
  1554. int ret = 0;
  1555. mutex_lock(&gpii->ctrl_lock);
  1556. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  1557. if (ret) {
  1558. GPII_ERR(gpii, gpii_chan->chid,
  1559. "Error Stopping Chan:%d,resetting\n", ret);
  1560. /* If STOP cmd fails, send command to Reset the channel */
  1561. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
  1562. if (ret)
  1563. GPII_ERR(gpii, gpii_chan->chid,
  1564. "error resetting channel:%d\n", ret);
  1565. }
  1566. mutex_unlock(&gpii->ctrl_lock);
  1567. return ret;
  1568. }
  1569. /*
  1570. * geni_gsi_disconnect_doorbell_stop_ch() - function to disconnect gsi doorbell and stop channel
  1571. * @chan: gsi channel handle
  1572. * @stop_ch: stop channel if set to true
  1573. *
  1574. * Return: Returns success or failure
  1575. */
  1576. int geni_gsi_disconnect_doorbell_stop_ch(struct dma_chan *chan, bool stop_ch)
  1577. {
  1578. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  1579. struct gpii *gpii = gpii_chan->gpii;
  1580. int ret = 0;
  1581. bool error = false;
  1582. /*
  1583. * Use asynchronous channel command 49 (see section 3.10.7) to dis-connect
  1584. * io_6 input from GSI interrupt input.
  1585. */
  1586. GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
  1587. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_DISABLE_HID);
  1588. if (ret) {
  1589. GPII_ERR(gpii, gpii_chan->chid,
  1590. "Error disable Chan:%d HID interrupt\n", ret);
  1591. error = true;
  1592. gpi_dump_debug_reg(gpii);
  1593. }
  1594. /* Disconnect only doorbell & free Rx chan desc */
  1595. if (!stop_ch) {
  1596. GPII_VERB(gpii, gpii_chan->chid, "Free RX chan desc\n");
  1597. gpi_free_chan_desc(&gpii->gpii_chan[1]);
  1598. return ret;
  1599. }
  1600. /* Stop RX channel */
  1601. GPII_INFO(gpii, gpii_chan->chid, "Stop RX chan\n");
  1602. ret = gpi_terminate_channel(&gpii->gpii_chan[1]);
  1603. if (ret) {
  1604. GPII_ERR(gpii, gpii_chan->chid,
  1605. "Error Stopping RX Chan:%d\n", ret);
  1606. error = true;
  1607. gpi_dump_debug_reg(gpii);
  1608. }
  1609. GPII_VERB(gpii, gpii_chan->chid, "Free RX chan desc\n");
  1610. gpi_free_chan_desc(&gpii->gpii_chan[1]);
  1611. /* Stop TX channel */
  1612. GPII_INFO(gpii, gpii_chan->chid, "Stop TX chan\n");
  1613. ret = gpi_terminate_channel(&gpii->gpii_chan[0]);
  1614. if (ret) {
  1615. GPII_ERR(gpii, gpii_chan->chid,
  1616. "Error Stopping TX Chan:%d\n", ret);
  1617. error = true;
  1618. gpi_dump_debug_reg(gpii);
  1619. }
  1620. GPII_VERB(gpii, gpii_chan->chid, "End\n");
  1621. if (error)
  1622. return -EBUSY;
  1623. return ret;
  1624. }
  1625. EXPORT_SYMBOL_GPL(geni_gsi_disconnect_doorbell_stop_ch);
  1626. /* program transfer ring DB register */
  1627. static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
  1628. struct gpi_ring *ring,
  1629. void *wp)
  1630. {
  1631. struct gpii *gpii = gpii_chan->gpii;
  1632. phys_addr_t p_wp;
  1633. p_wp = to_physical(ring, wp);
  1634. gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
  1635. }
  1636. /* program event ring DB register */
  1637. static inline void gpi_write_ev_db(struct gpii *gpii,
  1638. struct gpi_ring *ring,
  1639. void *wp)
  1640. {
  1641. phys_addr_t p_wp;
  1642. p_wp = ring->phys_addr + (wp - ring->base);
  1643. gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
  1644. }
  1645. /* notify client with generic event */
  1646. static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
  1647. enum msm_gpi_cb_event event,
  1648. u64 status)
  1649. {
  1650. struct gpii *gpii = gpii_chan->gpii;
  1651. struct gpi_client_info *client_info = &gpii_chan->client_info;
  1652. struct msm_gpi_cb msm_gpi_cb = {0};
  1653. GPII_ERR(gpii, gpii_chan->chid,
  1654. "notifying event:%s with status:%llu\n",
  1655. TO_GPI_CB_EVENT_STR(event), status);
  1656. msm_gpi_cb.cb_event = event;
  1657. msm_gpi_cb.status = status;
  1658. msm_gpi_cb.timestamp = sched_clock();
  1659. client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
  1660. client_info->cb_param);
  1661. }
  1662. /* process transfer completion interrupt */
  1663. static void gpi_process_ieob(struct gpii *gpii)
  1664. {
  1665. gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
  1666. /* process events based on priority */
  1667. if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
  1668. GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
  1669. gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
  1670. tasklet_schedule(&gpii->ev_task);
  1671. } else {
  1672. GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
  1673. gpi_process_events(gpii);
  1674. }
  1675. }
  1676. /* process channel control interrupt */
  1677. static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
  1678. {
  1679. u32 gpii_id = gpii->gpii_id;
  1680. u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
  1681. u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
  1682. u32 chid;
  1683. struct gpii_chan *gpii_chan;
  1684. u32 state;
  1685. /* clear the status */
  1686. offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
  1687. gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
  1688. for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
  1689. if (!(BIT(chid) & ch_irq))
  1690. continue;
  1691. gpii_chan = &gpii->gpii_chan[chid];
  1692. GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
  1693. state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
  1694. CNTXT_0_CONFIG);
  1695. state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
  1696. GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
  1697. /*
  1698. * CH_CMD_DEALLOC cmd always successful. However cmd does
  1699. * not change hardware status. So overwriting software state
  1700. * to default state.
  1701. */
  1702. if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
  1703. state = DEFAULT_CH_STATE;
  1704. else if (gpii->gpi_cmd == GPI_CH_CMD_ENABLE_HID)
  1705. state = CH_STATE_ENABLE_HID;
  1706. else if (gpii->gpi_cmd == GPI_CH_CMD_DISABLE_HID)
  1707. state = CH_STATE_DISABLE_HID;
  1708. gpii_chan->ch_state = state;
  1709. GPII_VERB(gpii, chid, "setting channel to state:%s\n",
  1710. TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
  1711. complete_all(&gpii->cmd_completion);
  1712. /* notifying clients if in error state */
  1713. if (gpii_chan->ch_state == CH_STATE_ERROR)
  1714. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
  1715. __LINE__);
  1716. }
  1717. }
  1718. /* processing gpi general error interrupts */
  1719. static void gpi_process_gen_err_irq(struct gpii *gpii)
  1720. {
  1721. u32 gpii_id = gpii->gpii_id;
  1722. u32 offset = GPI_GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
  1723. u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
  1724. u32 chid;
  1725. struct gpii_chan *gpii_chan;
  1726. /* clear the status */
  1727. GPII_ERR(gpii, GPI_DBG_COMMON, "irq_stts:0x%x\n", irq_stts);
  1728. /* Notify the client about error */
  1729. for (chid = 0, gpii_chan = gpii->gpii_chan;
  1730. chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
  1731. if (gpii_chan->client_info.callback)
  1732. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
  1733. irq_stts);
  1734. /* Clear the register */
  1735. offset = GPI_GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
  1736. gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
  1737. gpii->dbg_gpi_irq_cnt++;
  1738. if (!gpii->reg_table_dump) {
  1739. gpi_dump_debug_reg(gpii);
  1740. gpii->reg_table_dump = true;
  1741. }
  1742. }
  1743. /* processing gpi level error interrupts */
  1744. static void gpi_process_glob_err_irq(struct gpii *gpii)
  1745. {
  1746. u32 gpii_id = gpii->gpii_id;
  1747. u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
  1748. u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
  1749. u32 error_log;
  1750. u32 chid;
  1751. struct gpii_chan *gpii_chan;
  1752. struct gpi_client_info *client_info;
  1753. struct msm_gpi_cb msm_gpi_cb;
  1754. struct gpi_error_log_entry *log_entry =
  1755. (struct gpi_error_log_entry *)&error_log;
  1756. offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
  1757. gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
  1758. /* only error interrupt should be set */
  1759. if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
  1760. GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
  1761. irq_stts);
  1762. goto error_irq;
  1763. }
  1764. offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
  1765. error_log = gpi_read_reg(gpii, gpii->regs + offset);
  1766. gpi_write_reg(gpii, gpii->regs + offset, 0);
  1767. /* get channel info */
  1768. chid = ((struct gpi_error_log_entry *)&error_log)->chid;
  1769. if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
  1770. GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
  1771. chid);
  1772. goto error_irq;
  1773. }
  1774. gpii_chan = &gpii->gpii_chan[chid];
  1775. client_info = &gpii_chan->client_info;
  1776. /* notify client with error log */
  1777. msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
  1778. msm_gpi_cb.error_log.routine = log_entry->routine;
  1779. msm_gpi_cb.error_log.type = log_entry->type;
  1780. msm_gpi_cb.error_log.error_code = log_entry->code;
  1781. GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
  1782. TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
  1783. GPII_ERR(gpii, gpii_chan->chid,
  1784. "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
  1785. log_entry->ee, log_entry->chtype,
  1786. msm_gpi_cb.error_log.routine,
  1787. msm_gpi_cb.error_log.type,
  1788. msm_gpi_cb.error_log.error_code);
  1789. client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
  1790. client_info->cb_param);
  1791. return;
  1792. error_irq:
  1793. for (chid = 0, gpii_chan = gpii->gpii_chan;
  1794. chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
  1795. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
  1796. irq_stts);
  1797. }
  1798. /* gpii interrupt handler */
  1799. static irqreturn_t gpi_handle_irq(int irq, void *data)
  1800. {
  1801. struct gpii *gpii = data;
  1802. u32 type;
  1803. unsigned long flags;
  1804. u32 offset;
  1805. u32 gpii_id = gpii->gpii_id;
  1806. GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
  1807. gpii->dual_ee_sync_flag = true;
  1808. read_lock_irqsave(&gpii->pm_lock, flags);
  1809. /*
  1810. * States are out of sync to receive interrupt
  1811. * while software state is in DISABLE state, bailing out.
  1812. */
  1813. if (!REG_ACCESS_VALID(gpii->pm_state)) {
  1814. GPII_CRITIC(gpii, GPI_DBG_COMMON,
  1815. "receive interrupt while in %s state\n",
  1816. TO_GPI_PM_STR(gpii->pm_state));
  1817. goto exit_irq;
  1818. }
  1819. offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
  1820. type = gpi_read_reg(gpii, gpii->regs + offset);
  1821. do {
  1822. GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
  1823. type);
  1824. /* global gpii error */
  1825. if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
  1826. GPII_ERR(gpii, GPI_DBG_COMMON,
  1827. "processing global error irq\n");
  1828. gpi_process_glob_err_irq(gpii);
  1829. type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
  1830. }
  1831. /* transfer complete interrupt */
  1832. if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
  1833. GPII_VERB(gpii, GPI_DBG_COMMON,
  1834. "process IEOB interrupts\n");
  1835. gpi_process_ieob(gpii);
  1836. type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
  1837. }
  1838. /* event control irq */
  1839. if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
  1840. u32 ev_state;
  1841. u32 ev_ch_irq;
  1842. GPII_INFO(gpii, GPI_DBG_COMMON,
  1843. "processing EV CTRL interrupt\n");
  1844. offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
  1845. ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
  1846. offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
  1847. (gpii_id);
  1848. gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
  1849. ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
  1850. CNTXT_0_CONFIG);
  1851. ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
  1852. ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
  1853. /*
  1854. * CMD EV_CMD_DEALLOC is always successful. However
  1855. * cmd does not change hardware status. So overwriting
  1856. * software state to default state.
  1857. */
  1858. if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
  1859. ev_state = DEFAULT_EV_CH_STATE;
  1860. gpii->ev_state = ev_state;
  1861. GPII_INFO(gpii, GPI_DBG_COMMON,
  1862. "setting EV state to %s\n",
  1863. TO_GPI_EV_STATE_STR(gpii->ev_state));
  1864. complete_all(&gpii->cmd_completion);
  1865. type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
  1866. }
  1867. /* channel control irq */
  1868. if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
  1869. GPII_INFO(gpii, GPI_DBG_COMMON,
  1870. "process CH CTRL interrupts\n");
  1871. gpi_process_ch_ctrl_irq(gpii);
  1872. type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
  1873. }
  1874. if (type) {
  1875. GPII_CRITIC(gpii, GPI_DBG_COMMON,
  1876. "Unhandled interrupt status:0x%x\n", type);
  1877. gpi_process_gen_err_irq(gpii);
  1878. goto exit_irq;
  1879. }
  1880. offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
  1881. type = gpi_read_reg(gpii, gpii->regs + offset);
  1882. } while (type);
  1883. exit_irq:
  1884. read_unlock_irqrestore(&gpii->pm_lock, flags);
  1885. GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
  1886. gpii->dual_ee_sync_flag = false;
  1887. return IRQ_HANDLED;
  1888. }
  1889. /* process qup notification events */
  1890. static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
  1891. struct qup_notif_event *notif_event)
  1892. {
  1893. struct gpi_client_info *client_info = &gpii_chan->client_info;
  1894. struct msm_gpi_cb msm_gpi_cb;
  1895. GPII_VERB(gpii_chan->gpii, gpii_chan->chid,
  1896. "status:0x%x time:0x%x count:0x%x\n",
  1897. notif_event->status, notif_event->time, notif_event->count);
  1898. msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
  1899. msm_gpi_cb.status = notif_event->status;
  1900. msm_gpi_cb.timestamp = notif_event->time;
  1901. msm_gpi_cb.count = notif_event->count;
  1902. GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
  1903. TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
  1904. client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
  1905. client_info->cb_param);
  1906. }
  1907. /* free gpi_desc for the specified channel */
  1908. static void gpi_free_chan_desc(struct gpii_chan *gpii_chan)
  1909. {
  1910. struct virt_dma_desc *vd;
  1911. struct gpi_desc *gpi_desc;
  1912. unsigned long flags;
  1913. GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "Enter\n");
  1914. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  1915. vd = vchan_next_desc(&gpii_chan->vc);
  1916. if (!vd) {
  1917. GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "vd is NULL!!!\n");
  1918. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  1919. return;
  1920. }
  1921. gpi_desc = to_gpi_desc(vd);
  1922. list_del(&vd->node);
  1923. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  1924. kfree(gpi_desc);
  1925. gpi_desc = NULL;
  1926. }
  1927. /* process DMA Immediate completion data events */
  1928. static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
  1929. struct immediate_data_event *imed_event)
  1930. {
  1931. struct gpii *gpii = gpii_chan->gpii;
  1932. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  1933. struct virt_dma_desc *vd;
  1934. struct gpi_desc *gpi_desc;
  1935. void *tre = ch_ring->base +
  1936. (ch_ring->el_size * imed_event->tre_index);
  1937. struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
  1938. unsigned long flags;
  1939. u32 chid;
  1940. struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN];
  1941. /*
  1942. * If channel not active don't process event but let
  1943. * client know pending event is available
  1944. */
  1945. if (gpii_chan->pm_state != ACTIVE_STATE) {
  1946. GPII_ERR(gpii, gpii_chan->chid,
  1947. "skipping processing event because ch @ %s state\n",
  1948. TO_GPI_PM_STR(gpii_chan->pm_state));
  1949. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
  1950. __LINE__);
  1951. return;
  1952. }
  1953. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  1954. vd = vchan_next_desc(&gpii_chan->vc);
  1955. if (!vd) {
  1956. struct gpi_ere *gpi_ere;
  1957. struct msm_gpi_tre *gpi_tre;
  1958. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  1959. GPII_ERR(gpii, gpii_chan->chid,
  1960. "event without a pending descriptor!\n");
  1961. gpi_ere = (struct gpi_ere *)imed_event;
  1962. GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
  1963. gpi_ere->dword[0], gpi_ere->dword[1],
  1964. gpi_ere->dword[2], gpi_ere->dword[3]);
  1965. gpi_tre = tre;
  1966. GPII_ERR(gpii, gpii_chan->chid,
  1967. "Pending TRE: %08x %08x %08x %08x\n",
  1968. gpi_tre->dword[0], gpi_tre->dword[1],
  1969. gpi_tre->dword[2], gpi_tre->dword[3]);
  1970. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
  1971. __LINE__);
  1972. return;
  1973. }
  1974. gpi_desc = to_gpi_desc(vd);
  1975. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  1976. /*
  1977. * RP pointed by Event is to last TRE processed,
  1978. * we need to update ring rp to tre + 1
  1979. */
  1980. tre += ch_ring->el_size;
  1981. if (tre >= (ch_ring->base + ch_ring->len))
  1982. tre = ch_ring->base;
  1983. ch_ring->rp = tre;
  1984. /* make sure rp updates are immediately visible to all cores */
  1985. smp_wmb();
  1986. /*
  1987. * If unlock tre is present, don't send transfer callback on
  1988. * IEOT, wait for unlock IEOB. Free the respective channel
  1989. * descriptors.
  1990. * If unlock is not present, IEOB indicates freeing the descriptor
  1991. * and IEOT indicates channel transfer completion.
  1992. */
  1993. chid = imed_event->chid;
  1994. if (gpii->unlock_tre_set) {
  1995. if (chid == GPI_RX_CHAN) {
  1996. if (imed_event->code == MSM_GPI_TCE_EOT)
  1997. goto gpi_free_desc;
  1998. else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
  1999. /*
  2000. * In case of an error in a read transfer on a
  2001. * shared se, unlock tre will not be processed
  2002. * as channels go to bad state so tx desc should
  2003. * be freed manually.
  2004. */
  2005. gpi_free_chan_desc(gpii_tx_chan);
  2006. else
  2007. return;
  2008. } else if (imed_event->code == MSM_GPI_TCE_EOT) {
  2009. return;
  2010. }
  2011. } else if (imed_event->code == MSM_GPI_TCE_EOB) {
  2012. goto gpi_free_desc;
  2013. }
  2014. tx_cb_param = vd->tx.callback_param;
  2015. if (vd->tx.callback && tx_cb_param) {
  2016. struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
  2017. GPII_VERB(gpii, gpii_chan->chid,
  2018. "cb_length:%u compl_code:0x%x status:0x%x\n",
  2019. imed_event->length, imed_event->code,
  2020. imed_event->status);
  2021. /* Update immediate data if any from event */
  2022. *imed_tre = *((struct msm_gpi_tre *)imed_event);
  2023. tx_cb_param->length = imed_event->length;
  2024. tx_cb_param->completion_code = imed_event->code;
  2025. tx_cb_param->status = imed_event->status;
  2026. vd->tx.callback(tx_cb_param);
  2027. }
  2028. gpi_free_desc:
  2029. gpi_free_chan_desc(gpii_chan);
  2030. }
  2031. /* processing transfer completion events */
  2032. static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
  2033. struct xfer_compl_event *compl_event)
  2034. {
  2035. struct gpii *gpii = gpii_chan->gpii;
  2036. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  2037. void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
  2038. struct virt_dma_desc *vd;
  2039. struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
  2040. struct gpi_desc *gpi_desc;
  2041. unsigned long flags;
  2042. u32 chid;
  2043. struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN];
  2044. /* only process events on active channel */
  2045. if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
  2046. GPII_ERR(gpii, gpii_chan->chid,
  2047. "skipping processing event because ch @ %s state\n",
  2048. TO_GPI_PM_STR(gpii_chan->pm_state));
  2049. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
  2050. __LINE__);
  2051. return;
  2052. }
  2053. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  2054. vd = vchan_next_desc(&gpii_chan->vc);
  2055. if (!vd) {
  2056. struct gpi_ere *gpi_ere;
  2057. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  2058. GPII_ERR(gpii, gpii_chan->chid,
  2059. "Event without a pending descriptor!\n");
  2060. gpi_ere = (struct gpi_ere *)compl_event;
  2061. GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
  2062. gpi_ere->dword[0], gpi_ere->dword[1],
  2063. gpi_ere->dword[2], gpi_ere->dword[3]);
  2064. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
  2065. __LINE__);
  2066. return;
  2067. }
  2068. gpi_desc = to_gpi_desc(vd);
  2069. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  2070. /*
  2071. * RP pointed by Event is to last TRE processed,
  2072. * we need to update ring rp to ev_rp + 1
  2073. */
  2074. ev_rp += ch_ring->el_size;
  2075. if (ev_rp >= (ch_ring->base + ch_ring->len))
  2076. ev_rp = ch_ring->base;
  2077. ch_ring->rp = ev_rp;
  2078. /* update must be visible to other cores */
  2079. smp_wmb();
  2080. /*
  2081. * If unlock tre is present, don't send transfer callback on
  2082. * IEOT, wait for unlock IEOB. Free the respective channel
  2083. * descriptors.
  2084. * If unlock is not present, IEOB indicates freeing the descriptor
  2085. * and IEOT indicates channel transfer completion.
  2086. */
  2087. chid = compl_event->chid;
  2088. if (gpii->unlock_tre_set) {
  2089. if (chid == GPI_RX_CHAN) {
  2090. if (compl_event->code == MSM_GPI_TCE_EOT)
  2091. goto gpi_free_desc;
  2092. else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
  2093. /*
  2094. * In case of an error in a read transfer on a
  2095. * shared se, unlock tre will not be processed
  2096. * as channels go to bad state so tx desc should
  2097. * be freed manually.
  2098. */
  2099. gpi_free_chan_desc(gpii_tx_chan);
  2100. else
  2101. return;
  2102. } else if (compl_event->code == MSM_GPI_TCE_EOT) {
  2103. return;
  2104. }
  2105. } else if (compl_event->code == MSM_GPI_TCE_EOB) {
  2106. if (!(gpii_chan->num_tre == 1 && gpii_chan->lock_tre_set)
  2107. && (gpii->protocol != SE_PROTOCOL_UART))
  2108. goto gpi_free_desc;
  2109. }
  2110. tx_cb_param = vd->tx.callback_param;
  2111. if (vd->tx.callback && tx_cb_param) {
  2112. GPII_VERB(gpii, gpii_chan->chid,
  2113. "cb_length:%u compl_code:0x%x status:0x%x\n",
  2114. compl_event->length, compl_event->code,
  2115. compl_event->status);
  2116. tx_cb_param->length = compl_event->length;
  2117. tx_cb_param->completion_code = compl_event->code;
  2118. tx_cb_param->status = compl_event->status;
  2119. tx_cb_param->tce_type = compl_event->type;
  2120. GPII_INFO(gpii, gpii_chan->chid, "tx_cb_param:%p\n", tx_cb_param);
  2121. vd->tx.callback(tx_cb_param);
  2122. }
  2123. gpi_free_desc:
  2124. gpi_free_chan_desc(gpii_chan);
  2125. }
  2126. /* process Q2SPI_STATUS TCE notification event */
  2127. static void
  2128. gpi_process_qup_q2spi_status(struct gpii_chan *gpii_chan,
  2129. struct qup_q2spi_status *q2spi_status_event)
  2130. {
  2131. struct gpii *gpii = gpii_chan->gpii;
  2132. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  2133. void *ev_rp = to_virtual(ch_ring, q2spi_status_event->ptr_l);
  2134. struct virt_dma_desc *vd;
  2135. struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
  2136. struct gpi_desc *gpi_desc;
  2137. unsigned long flags;
  2138. /* only process events on active channel */
  2139. if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
  2140. GPII_ERR(gpii, gpii_chan->chid, "skipping processing event because ch @ %s state\n",
  2141. TO_GPI_PM_STR(gpii_chan->pm_state));
  2142. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT, __LINE__);
  2143. return;
  2144. }
  2145. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  2146. vd = vchan_next_desc(&gpii_chan->vc);
  2147. if (!vd) {
  2148. struct gpi_ere *gpi_ere;
  2149. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  2150. GPII_ERR(gpii, gpii_chan->chid,
  2151. "Event without a pending descriptor!\n");
  2152. gpi_ere = (struct gpi_ere *)q2spi_status_event;
  2153. GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
  2154. gpi_ere->dword[0], gpi_ere->dword[1],
  2155. gpi_ere->dword[2], gpi_ere->dword[3]);
  2156. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH, __LINE__);
  2157. return;
  2158. }
  2159. gpi_desc = to_gpi_desc(vd);
  2160. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  2161. /*
  2162. * RP pointed by Event is to last TRE processed,
  2163. * we need to update ring rp to ev_rp + 1
  2164. */
  2165. ev_rp += ch_ring->el_size;
  2166. if (ev_rp >= (ch_ring->base + ch_ring->len))
  2167. ev_rp = ch_ring->base;
  2168. ch_ring->rp = ev_rp;
  2169. /* update must be visible to other cores */
  2170. smp_wmb();
  2171. if (q2spi_status_event->code == MSM_GPI_TCE_EOB) {
  2172. if (gpii->protocol != SE_PROTOCOL_Q2SPI)
  2173. goto gpi_free_desc;
  2174. }
  2175. tx_cb_param = vd->tx.callback_param;
  2176. if (vd->tx.callback && tx_cb_param) {
  2177. GPII_VERB(gpii, gpii_chan->chid,
  2178. "cb_length:%u code:0x%x type:0x%x status:0x%x q2spi_status:0x%x\n",
  2179. q2spi_status_event->length, q2spi_status_event->code,
  2180. q2spi_status_event->type, q2spi_status_event->status,
  2181. q2spi_status_event->value);
  2182. tx_cb_param->length = q2spi_status_event->length;
  2183. tx_cb_param->completion_code = q2spi_status_event->code;
  2184. tx_cb_param->tce_type = q2spi_status_event->type;
  2185. tx_cb_param->status = q2spi_status_event->status;
  2186. tx_cb_param->q2spi_status = q2spi_status_event->value;
  2187. vd->tx.callback(tx_cb_param);
  2188. }
  2189. gpi_free_desc:
  2190. gpi_free_chan_desc(gpii_chan);
  2191. }
  2192. /* process Q2SPI CR Header TCE notification event */
  2193. static void
  2194. gpi_process_xfer_q2spi_cr_header(struct gpii_chan *gpii_chan,
  2195. struct qup_q2spi_cr_header_event *q2spi_cr_header_event)
  2196. {
  2197. struct gpi_client_info *client_info = &gpii_chan->client_info;
  2198. struct gpii *gpii_ptr = NULL;
  2199. struct msm_gpi_cb msm_gpi_cb;
  2200. gpii_ptr = gpii_chan->gpii;
  2201. GPII_VERB(gpii_ptr, gpii_chan->chid,
  2202. "code:0x%x type:0x%x hdr_0:0x%x hrd_1:0x%x hrd_2:0x%x hdr3:0x%x\n",
  2203. q2spi_cr_header_event->code, q2spi_cr_header_event->type,
  2204. q2spi_cr_header_event->cr_hdr[0], q2spi_cr_header_event->cr_hdr[1],
  2205. q2spi_cr_header_event->cr_hdr[2], q2spi_cr_header_event->cr_hdr[3]);
  2206. GPII_VERB(gpii_ptr, gpii_chan->chid,
  2207. "cr_byte_0:0x%x cr_byte_1:0x%x cr_byte_2:0x%x cr_byte_3h:0x%x\n",
  2208. q2spi_cr_header_event->cr_ed_byte[0], q2spi_cr_header_event->cr_ed_byte[1],
  2209. q2spi_cr_header_event->cr_ed_byte[2], q2spi_cr_header_event->cr_ed_byte[3]);
  2210. GPII_VERB(gpii_ptr, gpii_chan->chid, "code:0x%x\n", q2spi_cr_header_event->code);
  2211. GPII_VERB(gpii_ptr, gpii_chan->chid,
  2212. "cr_byte_0_len:0x%x cr_byte_0_err:0x%x type:0x%x ch_id:0x%x\n",
  2213. q2spi_cr_header_event->byte0_len, q2spi_cr_header_event->byte0_err,
  2214. q2spi_cr_header_event->type, q2spi_cr_header_event->ch_id);
  2215. msm_gpi_cb.cb_event = MSM_GPI_QUP_CR_HEADER;
  2216. msm_gpi_cb.q2spi_cr_header_event = *q2spi_cr_header_event;
  2217. GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
  2218. TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
  2219. client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
  2220. client_info->cb_param);
  2221. }
  2222. /* process all events */
  2223. static void gpi_process_events(struct gpii *gpii)
  2224. {
  2225. struct gpi_ring *ev_ring = gpii->ev_ring;
  2226. phys_addr_t cntxt_rp, local_rp;
  2227. void *rp;
  2228. union gpi_event *gpi_event;
  2229. struct gpii_chan *gpii_chan;
  2230. u32 chid, type;
  2231. cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
  2232. rp = to_virtual(ev_ring, cntxt_rp);
  2233. local_rp = to_physical(ev_ring, ev_ring->rp);
  2234. GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa rp:%pa ev_ring->rp:%pa\n",
  2235. &cntxt_rp, &local_rp, rp, ev_ring->rp);
  2236. do {
  2237. while (rp != ev_ring->rp) {
  2238. gpi_event = ev_ring->rp;
  2239. chid = gpi_event->xfer_compl_event.chid;
  2240. type = gpi_event->xfer_compl_event.type;
  2241. GPII_VERB(gpii, GPI_DBG_COMMON,
  2242. "chid:%u type:0x%x %08x %08x %08x %08x\n",
  2243. chid, type,
  2244. gpi_event->gpi_ere.dword[0],
  2245. gpi_event->gpi_ere.dword[1],
  2246. gpi_event->gpi_ere.dword[2],
  2247. gpi_event->gpi_ere.dword[3]);
  2248. if (chid >= MAX_CHANNELS_PER_GPII) {
  2249. GPII_ERR(gpii, GPI_DBG_COMMON,
  2250. "gpii channel:%d not valid\n", chid);
  2251. goto error_irq;
  2252. }
  2253. switch (type) {
  2254. case XFER_COMPLETE_EV_TYPE:
  2255. gpii_chan = &gpii->gpii_chan[chid];
  2256. gpi_process_xfer_compl_event(gpii_chan,
  2257. &gpi_event->xfer_compl_event);
  2258. break;
  2259. case STALE_EV_TYPE:
  2260. GPII_VERB(gpii, GPI_DBG_COMMON,
  2261. "stale event, not processing\n");
  2262. break;
  2263. case IMMEDIATE_DATA_EV_TYPE:
  2264. gpii_chan = &gpii->gpii_chan[chid];
  2265. gpi_process_imed_data_event(gpii_chan,
  2266. &gpi_event->immediate_data_event);
  2267. break;
  2268. case QUP_NOTIF_EV_TYPE:
  2269. gpii_chan = &gpii->gpii_chan[chid];
  2270. gpi_process_qup_notif_event(gpii_chan,
  2271. &gpi_event->qup_notif_event);
  2272. break;
  2273. case QUP_TCE_TYPE_Q2SPI_STATUS:
  2274. gpii_chan = &gpii->gpii_chan[chid];
  2275. gpi_process_qup_q2spi_status(gpii_chan, &gpi_event->q2spi_status);
  2276. break;
  2277. case QUP_TCE_TYPE_Q2SPI_CR_HEADER:
  2278. gpii_chan = &gpii->gpii_chan[chid];
  2279. gpi_process_xfer_q2spi_cr_header(gpii_chan,
  2280. &gpi_event->q2spi_cr_header_event);
  2281. break;
  2282. default:
  2283. GPII_VERB(gpii, GPI_DBG_COMMON,
  2284. "not supported event type:0x%x\n",
  2285. type);
  2286. }
  2287. gpi_ring_recycle_ev_element(ev_ring);
  2288. }
  2289. gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
  2290. /* clear pending IEOB events */
  2291. gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
  2292. cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
  2293. rp = to_virtual(ev_ring, cntxt_rp);
  2294. } while (rp != ev_ring->rp);
  2295. GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:%pa\n", &cntxt_rp);
  2296. return;
  2297. error_irq:
  2298. /* clear pending IEOB events */
  2299. gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
  2300. for (chid = 0, gpii_chan = gpii->gpii_chan;
  2301. chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
  2302. gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
  2303. (type << 8) | chid);
  2304. }
  2305. /* processing events using tasklet */
  2306. static void gpi_ev_tasklet(unsigned long data)
  2307. {
  2308. struct gpii *gpii = (struct gpii *)data;
  2309. GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
  2310. read_lock_bh(&gpii->pm_lock);
  2311. if (!REG_ACCESS_VALID(gpii->pm_state)) {
  2312. read_unlock_bh(&gpii->pm_lock);
  2313. GPII_ERR(gpii, GPI_DBG_COMMON,
  2314. "not processing any events, pm_state:%s\n",
  2315. TO_GPI_PM_STR(gpii->pm_state));
  2316. return;
  2317. }
  2318. /* process the events */
  2319. gpi_process_events(gpii);
  2320. /* enable IEOB, switching back to interrupts */
  2321. gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
  2322. read_unlock_bh(&gpii->pm_lock);
  2323. GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
  2324. }
  2325. /* marks all pending events for the channel as stale */
  2326. void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
  2327. {
  2328. struct gpii *gpii = gpii_chan->gpii;
  2329. struct gpi_ring *ev_ring = gpii->ev_ring;
  2330. void *ev_rp;
  2331. u32 cntxt_rp, local_rp;
  2332. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2333. cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
  2334. ev_rp = ev_ring->rp;
  2335. local_rp = (u32)to_physical(ev_ring, ev_rp);
  2336. while (local_rp != cntxt_rp) {
  2337. union gpi_event *gpi_event = ev_rp;
  2338. u32 chid = gpi_event->xfer_compl_event.chid;
  2339. if (chid == gpii_chan->chid)
  2340. gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
  2341. ev_rp += ev_ring->el_size;
  2342. if (ev_rp >= (ev_ring->base + ev_ring->len))
  2343. ev_rp = ev_ring->base;
  2344. cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
  2345. local_rp = (u32)to_physical(ev_ring, ev_rp);
  2346. }
  2347. }
  2348. /* reset sw state and issue channel reset or de-alloc */
  2349. static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
  2350. {
  2351. struct gpii *gpii = gpii_chan->gpii;
  2352. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  2353. unsigned long flags;
  2354. LIST_HEAD(list);
  2355. int ret;
  2356. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2357. ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
  2358. if (ret) {
  2359. GPII_ERR(gpii, gpii_chan->chid,
  2360. "Error with cmd:%s ret:%d\n",
  2361. TO_GPI_CMD_STR(gpi_cmd), ret);
  2362. return ret;
  2363. }
  2364. /* initialize the local ring ptrs */
  2365. ch_ring->rp = ch_ring->base;
  2366. ch_ring->wp = ch_ring->base;
  2367. /* visible to other cores */
  2368. smp_wmb();
  2369. /* check event ring for any stale events */
  2370. write_lock_irq(&gpii->pm_lock);
  2371. gpi_mark_stale_events(gpii_chan);
  2372. /* remove all async descriptors */
  2373. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  2374. vchan_get_all_descriptors(&gpii_chan->vc, &list);
  2375. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  2376. write_unlock_irq(&gpii->pm_lock);
  2377. vchan_dma_desc_free_list(&gpii_chan->vc, &list);
  2378. return 0;
  2379. }
  2380. static int gpi_start_chan(struct gpii_chan *gpii_chan)
  2381. {
  2382. struct gpii *gpii = gpii_chan->gpii;
  2383. int ret;
  2384. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2385. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
  2386. if (ret) {
  2387. GPII_ERR(gpii, gpii_chan->chid,
  2388. "Error with cmd:%s ret:%d\n",
  2389. TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
  2390. return ret;
  2391. }
  2392. /* gpii CH is active now */
  2393. write_lock_irq(&gpii->pm_lock);
  2394. gpii_chan->pm_state = ACTIVE_STATE;
  2395. write_unlock_irq(&gpii->pm_lock);
  2396. return 0;
  2397. }
  2398. /* allocate and configure the transfer channel */
  2399. static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
  2400. {
  2401. struct gpii *gpii = gpii_chan->gpii;
  2402. struct gpi_ring *ring = gpii_chan->ch_ring;
  2403. int i;
  2404. int ret;
  2405. struct {
  2406. void *base;
  2407. int offset;
  2408. u32 val;
  2409. } ch_reg[] = {
  2410. {
  2411. gpii_chan->ch_cntxt_base_reg,
  2412. CNTXT_0_CONFIG,
  2413. GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
  2414. gpii_chan->dir,
  2415. GPI_CHTYPE_PROTO_GPI),
  2416. },
  2417. {
  2418. gpii_chan->ch_cntxt_base_reg,
  2419. CNTXT_1_R_LENGTH,
  2420. ring->len,
  2421. },
  2422. {
  2423. gpii_chan->ch_cntxt_base_reg,
  2424. CNTXT_2_RING_BASE_LSB,
  2425. (u32)ring->phys_addr,
  2426. },
  2427. {
  2428. gpii_chan->ch_cntxt_base_reg,
  2429. CNTXT_3_RING_BASE_MSB,
  2430. MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
  2431. },
  2432. { /* program MSB of DB register with ring base */
  2433. gpii_chan->ch_cntxt_db_reg,
  2434. CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
  2435. MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
  2436. },
  2437. {
  2438. gpii->regs,
  2439. GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
  2440. gpii_chan->chid),
  2441. GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
  2442. gpii_chan->init_config,
  2443. gpii_chan->protocol,
  2444. gpii_chan->seid),
  2445. },
  2446. {
  2447. gpii->regs,
  2448. GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
  2449. gpii_chan->chid),
  2450. 0,
  2451. },
  2452. {
  2453. gpii->regs,
  2454. GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
  2455. gpii_chan->chid),
  2456. 0,
  2457. },
  2458. {
  2459. gpii->regs,
  2460. GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
  2461. gpii_chan->chid),
  2462. 0,
  2463. },
  2464. {
  2465. gpii->regs,
  2466. GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
  2467. gpii_chan->chid),
  2468. 1,
  2469. },
  2470. { NULL },
  2471. };
  2472. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2473. if (send_alloc_cmd) {
  2474. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
  2475. if (ret) {
  2476. GPII_ERR(gpii, gpii_chan->chid,
  2477. "Error with cmd:%s ret:%d\n",
  2478. TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
  2479. return ret;
  2480. }
  2481. }
  2482. /* program channel cntxt registers */
  2483. for (i = 0; ch_reg[i].base; i++)
  2484. gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
  2485. ch_reg[i].val);
  2486. /* flush all the writes */
  2487. wmb();
  2488. return 0;
  2489. }
  2490. /* allocate and configure event ring */
  2491. static int gpi_alloc_ev_chan(struct gpii *gpii)
  2492. {
  2493. struct gpi_ring *ring = gpii->ev_ring;
  2494. int i;
  2495. int ret;
  2496. struct {
  2497. void *base;
  2498. int offset;
  2499. u32 val;
  2500. } ev_reg[] = {
  2501. {
  2502. gpii->ev_cntxt_base_reg,
  2503. CNTXT_0_CONFIG,
  2504. GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
  2505. GPI_INTTYPE_IRQ,
  2506. GPI_CHTYPE_GPI_EV),
  2507. },
  2508. {
  2509. gpii->ev_cntxt_base_reg,
  2510. CNTXT_1_R_LENGTH,
  2511. ring->len,
  2512. },
  2513. {
  2514. gpii->ev_cntxt_base_reg,
  2515. CNTXT_2_RING_BASE_LSB,
  2516. (u32)ring->phys_addr,
  2517. },
  2518. {
  2519. gpii->ev_cntxt_base_reg,
  2520. CNTXT_3_RING_BASE_MSB,
  2521. MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
  2522. },
  2523. {
  2524. /* program db msg with ring base msb */
  2525. gpii->ev_cntxt_db_reg,
  2526. CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
  2527. MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
  2528. },
  2529. {
  2530. gpii->ev_cntxt_base_reg,
  2531. CNTXT_8_RING_INT_MOD,
  2532. 0,
  2533. },
  2534. {
  2535. gpii->ev_cntxt_base_reg,
  2536. CNTXT_10_RING_MSI_LSB,
  2537. 0,
  2538. },
  2539. {
  2540. gpii->ev_cntxt_base_reg,
  2541. CNTXT_11_RING_MSI_MSB,
  2542. 0,
  2543. },
  2544. {
  2545. gpii->ev_cntxt_base_reg,
  2546. CNTXT_8_RING_INT_MOD,
  2547. 0,
  2548. },
  2549. {
  2550. gpii->ev_cntxt_base_reg,
  2551. CNTXT_12_RING_RP_UPDATE_LSB,
  2552. 0,
  2553. },
  2554. {
  2555. gpii->ev_cntxt_base_reg,
  2556. CNTXT_13_RING_RP_UPDATE_MSB,
  2557. 0,
  2558. },
  2559. { NULL },
  2560. };
  2561. GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
  2562. ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
  2563. if (ret) {
  2564. GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
  2565. TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
  2566. return ret;
  2567. }
  2568. /* program event context */
  2569. for (i = 0; ev_reg[i].base; i++)
  2570. gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
  2571. ev_reg[i].val);
  2572. /* add events to ring */
  2573. ring->wp = (ring->base + ring->len - ring->el_size);
  2574. /* flush all the writes */
  2575. wmb();
  2576. /* gpii is active now */
  2577. write_lock_irq(&gpii->pm_lock);
  2578. gpii->pm_state = ACTIVE_STATE;
  2579. write_unlock_irq(&gpii->pm_lock);
  2580. gpi_write_ev_db(gpii, ring, ring->wp);
  2581. return 0;
  2582. }
  2583. /* calculate # of ERE/TRE available to queue */
  2584. static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
  2585. {
  2586. int elements = 0;
  2587. if (ring->wp < ring->rp)
  2588. elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
  2589. else {
  2590. elements = (ring->rp - ring->base) / ring->el_size;
  2591. elements += ((ring->base + ring->len - ring->wp) /
  2592. ring->el_size) - 1;
  2593. }
  2594. return elements;
  2595. }
  2596. static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
  2597. {
  2598. if (gpi_ring_num_elements_avail(ring) <= 0)
  2599. return -ENOMEM;
  2600. *wp = ring->wp;
  2601. ring->wp += ring->el_size;
  2602. if (ring->wp >= (ring->base + ring->len))
  2603. ring->wp = ring->base;
  2604. /* visible to other cores */
  2605. smp_wmb();
  2606. return 0;
  2607. }
  2608. static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
  2609. {
  2610. /* Update the WP */
  2611. ring->wp += ring->el_size;
  2612. if (ring->wp >= (ring->base + ring->len))
  2613. ring->wp = ring->base;
  2614. /* Update the RP */
  2615. ring->rp += ring->el_size;
  2616. if (ring->rp >= (ring->base + ring->len))
  2617. ring->rp = ring->base;
  2618. /* visible to other cores */
  2619. smp_wmb();
  2620. }
  2621. static void gpi_free_ring(struct gpi_ring *ring,
  2622. struct gpii *gpii)
  2623. {
  2624. dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
  2625. ring->pre_aligned, ring->dma_handle);
  2626. memset(ring, 0, sizeof(*ring));
  2627. }
  2628. /* allocate memory for transfer and event rings */
  2629. static int gpi_alloc_ring(struct gpi_ring *ring,
  2630. u32 elements,
  2631. u32 el_size,
  2632. struct gpii *gpii)
  2633. {
  2634. u64 len = elements * el_size;
  2635. int bit;
  2636. /* ring len must be power of 2 */
  2637. bit = find_last_bit((unsigned long *)&len, 32);
  2638. if (((1 << bit) - 1) & len)
  2639. bit++;
  2640. len = 1 << bit;
  2641. ring->alloc_size = (len + (len - 1));
  2642. GPII_INFO(gpii, GPI_DBG_COMMON,
  2643. "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
  2644. elements, el_size, (elements * el_size), len,
  2645. ring->alloc_size);
  2646. ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
  2647. ring->alloc_size,
  2648. &ring->dma_handle, GFP_KERNEL);
  2649. if (!ring->pre_aligned) {
  2650. GPII_CRITIC(gpii, GPI_DBG_COMMON,
  2651. "could not alloc size:%lu mem for ring\n",
  2652. ring->alloc_size);
  2653. return -ENOMEM;
  2654. }
  2655. /* align the physical mem */
  2656. ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
  2657. ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
  2658. ring->rp = ring->base;
  2659. ring->wp = ring->base;
  2660. ring->len = len;
  2661. ring->el_size = el_size;
  2662. ring->elements = ring->len / ring->el_size;
  2663. memset(ring->base, 0, ring->len);
  2664. ring->configured = true;
  2665. /* update to other cores */
  2666. smp_wmb();
  2667. GPII_INFO(gpii, GPI_DBG_COMMON,
  2668. "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
  2669. ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
  2670. ring->elements);
  2671. return 0;
  2672. }
  2673. /* copy tre into transfer ring */
  2674. static void gpi_queue_xfer(struct gpii *gpii,
  2675. struct gpii_chan *gpii_chan,
  2676. struct msm_gpi_tre *gpi_tre,
  2677. void **wp)
  2678. {
  2679. struct msm_gpi_tre *ch_tre;
  2680. int ret;
  2681. /* get next tre location we can copy */
  2682. ret = gpi_ring_add_element(gpii_chan->ch_ring, (void **)&ch_tre);
  2683. if (unlikely(ret)) {
  2684. GPII_CRITIC(gpii, gpii_chan->chid,
  2685. "Error adding ring element to xfer ring\n");
  2686. return;
  2687. }
  2688. /* copy the tre info */
  2689. memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
  2690. *wp = ch_tre;
  2691. }
  2692. /* reset and restart transfer channel */
  2693. int gpi_terminate_all(struct dma_chan *chan)
  2694. {
  2695. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  2696. struct gpii *gpii = gpii_chan->gpii;
  2697. int schid, echid, i;
  2698. int ret = 0;
  2699. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2700. mutex_lock(&gpii->ctrl_lock);
  2701. /*
  2702. * treat both channels as a group if its protocol is not UART
  2703. * STOP, RESET, or START needs to be in lockstep
  2704. */
  2705. schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
  2706. echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
  2707. MAX_CHANNELS_PER_GPII;
  2708. /* stop the channel */
  2709. for (i = schid; i < echid; i++) {
  2710. gpii_chan = &gpii->gpii_chan[i];
  2711. /* disable ch state so no more TRE processing */
  2712. write_lock_irq(&gpii->pm_lock);
  2713. gpii_chan->pm_state = PREPARE_TERMINATE;
  2714. write_unlock_irq(&gpii->pm_lock);
  2715. /* send command to Stop the channel */
  2716. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  2717. if (ret)
  2718. GPII_ERR(gpii, gpii_chan->chid,
  2719. "Error Stopping Chan:%d resetting\n", ret);
  2720. }
  2721. /* reset the channels (clears any pending tre) */
  2722. for (i = schid; i < echid; i++) {
  2723. gpii_chan = &gpii->gpii_chan[i];
  2724. ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
  2725. if (ret) {
  2726. GPII_ERR(gpii, gpii_chan->chid,
  2727. "Error resetting channel ret:%d\n", ret);
  2728. if (!gpii->reg_table_dump) {
  2729. gpi_dump_debug_reg(gpii);
  2730. gpii->reg_table_dump = true;
  2731. }
  2732. goto terminate_exit;
  2733. }
  2734. /* reprogram channel CNTXT */
  2735. ret = gpi_alloc_chan(gpii_chan, false);
  2736. if (ret) {
  2737. GPII_ERR(gpii, gpii_chan->chid,
  2738. "Error alloc_channel ret:%d\n", ret);
  2739. goto terminate_exit;
  2740. }
  2741. }
  2742. /* restart the channels */
  2743. for (i = schid; i < echid; i++) {
  2744. gpii_chan = &gpii->gpii_chan[i];
  2745. ret = gpi_start_chan(gpii_chan);
  2746. if (ret) {
  2747. GPII_ERR(gpii, gpii_chan->chid,
  2748. "Error Starting Channel ret:%d\n", ret);
  2749. goto terminate_exit;
  2750. }
  2751. }
  2752. terminate_exit:
  2753. mutex_unlock(&gpii->ctrl_lock);
  2754. return ret;
  2755. }
  2756. static void gpi_noop_tre(struct gpii_chan *gpii_chan)
  2757. {
  2758. struct gpii *gpii = gpii_chan->gpii;
  2759. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  2760. phys_addr_t local_rp, local_wp;
  2761. void *cntxt_rp;
  2762. u32 noop_mask, noop_tre;
  2763. struct msm_gpi_tre *tre;
  2764. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2765. local_rp = to_physical(ch_ring, ch_ring->rp);
  2766. local_wp = to_physical(ch_ring, ch_ring->wp);
  2767. cntxt_rp = ch_ring->rp;
  2768. GPII_INFO(gpii, gpii_chan->chid,
  2769. "local_rp:0x%0llx local_wp:0x%0llx\n", local_rp, local_wp);
  2770. noop_mask = NOOP_TRE_MASK(1, 0, 0, 0, 1);
  2771. noop_tre = NOOP_TRE;
  2772. while (local_rp != local_wp) {
  2773. /* dump the channel ring at the time of error */
  2774. tre = (struct msm_gpi_tre *)cntxt_rp;
  2775. GPII_ERR(gpii, gpii_chan->chid, "local_rp:0x%011x TRE: %08x %08x %08x %08x\n",
  2776. local_rp, tre->dword[0], tre->dword[1],
  2777. tre->dword[2], tre->dword[3]);
  2778. tre->dword[3] &= noop_mask;
  2779. tre->dword[3] |= noop_tre;
  2780. local_rp += ch_ring->el_size;
  2781. cntxt_rp += ch_ring->el_size;
  2782. if (cntxt_rp >= (ch_ring->base + ch_ring->len)) {
  2783. cntxt_rp = ch_ring->base;
  2784. local_rp = to_physical(ch_ring, ch_ring->base);
  2785. }
  2786. GPII_INFO(gpii, gpii_chan->chid,
  2787. "local_rp:0x%0llx\n", local_rp);
  2788. }
  2789. GPII_INFO(gpii, gpii_chan->chid, "exit\n");
  2790. }
  2791. /* pause dma transfer for all channels */
  2792. static int gpi_pause(struct dma_chan *chan)
  2793. {
  2794. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  2795. struct gpii *gpii = gpii_chan->gpii;
  2796. int i, ret, idx = 0;
  2797. u32 offset1, offset2, type1, type2;
  2798. struct gpi_ring *ev_ring = gpii->ev_ring;
  2799. struct msm_gpi_ctrl *gpi_ctrl = chan->private;
  2800. phys_addr_t cntxt_rp, local_rp;
  2801. void *rp, *rp1;
  2802. union gpi_event *gpi_event;
  2803. u32 chid, type;
  2804. int iter = 0;
  2805. unsigned long total_iter = 1000; //waiting10ms 1000*udelay(10)
  2806. GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
  2807. mutex_lock(&gpii->ctrl_lock);
  2808. /* if gpi_pause already done we are not doing again*/
  2809. if (!gpii->is_resumed) {
  2810. GPII_ERR(gpii, gpii_chan->chid, "Already in suspend/pause state\n");
  2811. mutex_unlock(&gpii->ctrl_lock);
  2812. return 0;
  2813. }
  2814. /* dump the GPII IRQ register at the time of error */
  2815. offset1 = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
  2816. offset2 = GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id);
  2817. while (idx++ < 3) {
  2818. type1 = gpi_read_reg(gpii, gpii->regs + offset1);
  2819. type2 = gpi_read_reg(gpii, gpii->regs + offset2);
  2820. GPII_ERR(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x IEOB_MASK_OFF:0x%08x\n",
  2821. type1, type2);
  2822. }
  2823. cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
  2824. if (!cntxt_rp) {
  2825. GPII_ERR(gpii, GPI_DBG_COMMON, "invalid cntxt_rp");
  2826. mutex_unlock(&gpii->ctrl_lock);
  2827. return -EINVAL;
  2828. }
  2829. rp = to_virtual(ev_ring, cntxt_rp);
  2830. local_rp = to_physical(ev_ring, ev_ring->rp);
  2831. if (!local_rp) {
  2832. GPII_ERR(gpii, GPI_DBG_COMMON, "invalid local_rp");
  2833. mutex_unlock(&gpii->ctrl_lock);
  2834. return -EINVAL;
  2835. }
  2836. rp1 = ev_ring->rp;
  2837. /* dump the event ring at the time of error */
  2838. GPII_ERR(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa\n",
  2839. &cntxt_rp, &local_rp);
  2840. while (rp != rp1) {
  2841. gpi_event = rp1;
  2842. chid = gpi_event->xfer_compl_event.chid;
  2843. type = gpi_event->xfer_compl_event.type;
  2844. GPII_ERR(gpii, GPI_DBG_COMMON,
  2845. "chid:%u type:0x%x %08x %08x %08x %08x rp:%pK rp1:%pK\n", chid, type,
  2846. gpi_event->gpi_ere.dword[0], gpi_event->gpi_ere.dword[1],
  2847. gpi_event->gpi_ere.dword[2], gpi_event->gpi_ere.dword[3], rp, rp1);
  2848. rp1 += ev_ring->el_size;
  2849. if (rp1 >= (ev_ring->base + ev_ring->len))
  2850. rp1 = ev_ring->base;
  2851. }
  2852. /* send stop command to stop the channels */
  2853. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2854. gpii_chan = &gpii->gpii_chan[i];
  2855. /* disable ch state so no more TRE processing */
  2856. write_lock_irq(&gpii->pm_lock);
  2857. gpii_chan->pm_state = PREPARE_TERMINATE;
  2858. write_unlock_irq(&gpii->pm_lock);
  2859. /* send command to Stop the channel */
  2860. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  2861. if (ret) {
  2862. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  2863. "Error stopping chan, ret:%d\n", ret);
  2864. mutex_unlock(&gpii->ctrl_lock);
  2865. return -ECONNRESET;
  2866. }
  2867. }
  2868. if (gpi_ctrl->cmd == MSM_GPI_DEEP_SLEEP_INIT) {
  2869. GPII_INFO(gpii, gpii_chan->chid, "deep sleep config\n");
  2870. /* Resetting the channels */
  2871. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2872. gpii_chan = &gpii->gpii_chan[i];
  2873. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
  2874. if (ret) {
  2875. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  2876. "Error resetting chan, ret:%d\n", ret);
  2877. mutex_unlock(&gpii->ctrl_lock);
  2878. return -ECONNRESET;
  2879. }
  2880. }
  2881. /* Dealloc the channels */
  2882. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2883. gpii_chan = &gpii->gpii_chan[i];
  2884. ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
  2885. if (ret) {
  2886. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  2887. "Error chan deallocating, ret:%d\n", ret);
  2888. mutex_unlock(&gpii->ctrl_lock);
  2889. return -ECONNRESET;
  2890. }
  2891. }
  2892. /* Dealloc Event Ring */
  2893. ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
  2894. if (ret) {
  2895. GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
  2896. TO_GPI_CMD_STR(GPI_EV_CMD_DEALLOC), ret);
  2897. mutex_unlock(&gpii->ctrl_lock);
  2898. return ret;
  2899. }
  2900. } else {
  2901. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2902. gpii_chan = &gpii->gpii_chan[i];
  2903. gpi_noop_tre(gpii_chan);
  2904. }
  2905. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2906. gpii_chan = &gpii->gpii_chan[i];
  2907. ret = gpi_start_chan(gpii_chan);
  2908. if (ret) {
  2909. GPII_ERR(gpii, gpii_chan->chid,
  2910. "Error Starting Channel ret:%d\n", ret);
  2911. mutex_unlock(&gpii->ctrl_lock);
  2912. return -ECONNRESET;
  2913. }
  2914. }
  2915. }
  2916. if (gpii->dual_ee_sync_flag) {
  2917. while (iter < total_iter) {
  2918. iter++;
  2919. /* Ensure ISR completed, so no more activity from GSI pending */
  2920. if (!gpii->dual_ee_sync_flag)
  2921. break;
  2922. udelay(10);
  2923. }
  2924. }
  2925. GPII_INFO(gpii, gpii_chan->chid, "iter:%d\n", iter);
  2926. disable_irq(gpii->irq);
  2927. gpii->is_resumed = false;
  2928. mutex_unlock(&gpii->ctrl_lock);
  2929. return 0;
  2930. }
  2931. /* resume dma transfer */
  2932. static int gpi_resume(struct dma_chan *chan)
  2933. {
  2934. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  2935. struct gpii *gpii = gpii_chan->gpii;
  2936. struct msm_gpi_ctrl *gpi_ctrl = chan->private;
  2937. int i;
  2938. int ret;
  2939. GPII_INFO(gpii, gpii_chan->chid, "enter\n");
  2940. mutex_lock(&gpii->ctrl_lock);
  2941. /* if gpi_pause not done we are not doing resume */
  2942. if (gpii->is_resumed) {
  2943. GPII_ERR(gpii, gpii_chan->chid, "Already resumed\n");
  2944. mutex_unlock(&gpii->ctrl_lock);
  2945. return 0;
  2946. }
  2947. enable_irq(gpii->irq);
  2948. /* We are updating is_resumed flag in middle of function, because
  2949. * enable_irq should happen only one time otherwise we may get
  2950. * unbalanced irq results. Without enable_irq we cannot issue commands
  2951. * to the gsi hw.
  2952. */
  2953. gpii->is_resumed = true;
  2954. /* For deep sleep restore the configuration similar to the probe.*/
  2955. if (gpi_ctrl->cmd == MSM_GPI_DEEP_SLEEP_INIT) {
  2956. GPII_INFO(gpii, gpii_chan->chid, "deep sleep config\n");
  2957. ret = gpi_deep_sleep_exit_config(chan, NULL);
  2958. if (ret) {
  2959. GPII_ERR(gpii, gpii_chan->chid,
  2960. "Err deep sleep config, ret:%d\n", ret);
  2961. mutex_unlock(&gpii->ctrl_lock);
  2962. return ret;
  2963. }
  2964. }
  2965. if (gpii->pm_state == ACTIVE_STATE) {
  2966. GPII_INFO(gpii, gpii_chan->chid,
  2967. "channel is already active\n");
  2968. mutex_unlock(&gpii->ctrl_lock);
  2969. return 0;
  2970. }
  2971. /* send start command to start the channels */
  2972. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  2973. ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
  2974. if (ret) {
  2975. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  2976. "Erro starting chan, ret:%d\n", ret);
  2977. mutex_unlock(&gpii->ctrl_lock);
  2978. return ret;
  2979. }
  2980. }
  2981. write_lock_irq(&gpii->pm_lock);
  2982. gpii->pm_state = ACTIVE_STATE;
  2983. write_unlock_irq(&gpii->pm_lock);
  2984. mutex_unlock(&gpii->ctrl_lock);
  2985. return 0;
  2986. }
  2987. void gpi_desc_free(struct virt_dma_desc *vd)
  2988. {
  2989. struct gpi_desc *gpi_desc = to_gpi_desc(vd);
  2990. kfree(gpi_desc);
  2991. gpi_desc = NULL;
  2992. }
  2993. /* copy tre into transfer ring */
  2994. struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
  2995. struct scatterlist *sgl,
  2996. unsigned int sg_len,
  2997. enum dma_transfer_direction direction,
  2998. unsigned long flags,
  2999. void *context)
  3000. {
  3001. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3002. struct gpii *gpii = gpii_chan->gpii;
  3003. u32 nr;
  3004. u32 nr_req = 0;
  3005. int i, j;
  3006. struct scatterlist *sg;
  3007. struct gpi_ring *ch_ring = gpii_chan->ch_ring;
  3008. void *tre, *wp = NULL;
  3009. const gfp_t gfp = GFP_ATOMIC;
  3010. struct gpi_desc *gpi_desc;
  3011. u32 tre_type;
  3012. gpii_chan->num_tre = sg_len;
  3013. GPII_VERB(gpii, gpii_chan->chid, "enter\n");
  3014. if (!is_slave_direction(direction)) {
  3015. GPII_ERR(gpii, gpii_chan->chid,
  3016. "invalid dma direction: %d\n", direction);
  3017. return NULL;
  3018. }
  3019. /* calculate # of elements required & available */
  3020. nr = gpi_ring_num_elements_avail(ch_ring);
  3021. for_each_sg(sgl, sg, sg_len, i) {
  3022. GPII_VERB(gpii, gpii_chan->chid,
  3023. "%d of %u len:%u\n", i, sg_len, sg->length);
  3024. nr_req += (sg->length / ch_ring->el_size);
  3025. }
  3026. GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
  3027. if (nr < nr_req) {
  3028. GPII_ERR(gpii, gpii_chan->chid,
  3029. "not enough space in ring, avail:%u required:%u\n",
  3030. nr, nr_req);
  3031. return NULL;
  3032. }
  3033. gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
  3034. if (!gpi_desc) {
  3035. GPII_ERR(gpii, gpii_chan->chid,
  3036. "out of memory for descriptor\n");
  3037. return NULL;
  3038. }
  3039. /* copy each tre into transfer ring */
  3040. for_each_sg(sgl, sg, sg_len, i) {
  3041. tre = sg_virt(sg);
  3042. if (sg_len == 1) {
  3043. tre_type =
  3044. MSM_GPI_TRE_TYPE(((struct msm_gpi_tre *)tre));
  3045. gpii_chan->lock_tre_set =
  3046. tre_type == MSM_GPI_TRE_LOCK ? (u32)true : (u32)false;
  3047. }
  3048. /* Check if last tre is an unlock tre */
  3049. if (i == sg_len - 1) {
  3050. tre_type =
  3051. MSM_GPI_TRE_TYPE(((struct msm_gpi_tre *)tre));
  3052. gpii->unlock_tre_set =
  3053. tre_type == MSM_GPI_TRE_UNLOCK ? (u32)true : (u32)false;
  3054. }
  3055. for (j = 0; j < sg->length;
  3056. j += ch_ring->el_size, tre += ch_ring->el_size)
  3057. gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
  3058. }
  3059. /* set up the descriptor */
  3060. gpi_desc->db = ch_ring->wp;
  3061. gpi_desc->wp = wp;
  3062. gpi_desc->gpii_chan = gpii_chan;
  3063. GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
  3064. to_physical(ch_ring, ch_ring->wp),
  3065. to_physical(ch_ring, ch_ring->rp));
  3066. return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
  3067. }
  3068. /* rings transfer ring db to being transfer */
  3069. static void gpi_issue_pending(struct dma_chan *chan)
  3070. {
  3071. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3072. struct gpii *gpii = gpii_chan->gpii;
  3073. unsigned long flags, pm_lock_flags;
  3074. struct virt_dma_desc *vd = NULL;
  3075. struct gpi_desc *gpi_desc;
  3076. GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
  3077. read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
  3078. /* move all submitted discriptors to issued list */
  3079. spin_lock_irqsave(&gpii_chan->vc.lock, flags);
  3080. if (vchan_issue_pending(&gpii_chan->vc))
  3081. vd = list_last_entry(&gpii_chan->vc.desc_issued,
  3082. struct virt_dma_desc, node);
  3083. spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
  3084. /* nothing to do list is empty */
  3085. if (!vd) {
  3086. read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
  3087. GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
  3088. return;
  3089. }
  3090. gpi_desc = to_gpi_desc(vd);
  3091. gpi_write_ch_db(gpii_chan, gpii_chan->ch_ring, gpi_desc->db);
  3092. read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
  3093. }
  3094. static int gpi_deep_sleep_exit_config(struct dma_chan *chan,
  3095. struct dma_slave_config *config)
  3096. {
  3097. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3098. struct gpii *gpii = gpii_chan->gpii;
  3099. struct gpi_ring *ring_ch = NULL;
  3100. struct gpi_ring *ring_ev = NULL;
  3101. int i = 0;
  3102. int ret = 0;
  3103. GPII_INFO(gpii, gpii_chan->chid, "enter\n");
  3104. /* Reset the ring channel for TX,RX to the base address */
  3105. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  3106. gpii_chan = &gpii->gpii_chan[i];
  3107. ring_ch = gpii_chan->ch_ring;
  3108. ring_ch->wp = ring_ch->base;
  3109. ring_ch->rp = ring_ch->base;
  3110. }
  3111. /* Reset Event ring to the base address */
  3112. ring_ev = gpii->ev_ring;
  3113. ring_ev->wp = ring_ev->base;
  3114. ring_ev->rp = ring_ev->base;
  3115. ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
  3116. if (ret) {
  3117. GPII_ERR(gpii, gpii_chan->chid,
  3118. "error config. interrupts, ret:%d\n", ret);
  3119. return ret;
  3120. }
  3121. /* allocate event rings */
  3122. ret = gpi_alloc_ev_chan(gpii);
  3123. if (ret) {
  3124. GPII_ERR(gpii, gpii_chan->chid,
  3125. "error alloc_ev_chan:%d\n", ret);
  3126. goto error_alloc_ev_ring;
  3127. }
  3128. /* Allocate all channels */
  3129. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  3130. ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
  3131. if (ret) {
  3132. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  3133. "Error allocating chan:%d\n", ret);
  3134. goto error_alloc_chan;
  3135. }
  3136. }
  3137. /* start channels */
  3138. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  3139. ret = gpi_start_chan(&gpii->gpii_chan[i]);
  3140. if (ret) {
  3141. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  3142. "Error start chan:%d\n", ret);
  3143. goto error_start_chan;
  3144. }
  3145. }
  3146. return ret;
  3147. error_start_chan:
  3148. for (i = i - 1; i >= 0; i++) {
  3149. gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  3150. gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
  3151. }
  3152. i = 2;
  3153. error_alloc_chan:
  3154. for (i = i - 1; i >= 0; i--)
  3155. gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
  3156. error_alloc_ev_ring:
  3157. gpi_disable_interrupts(gpii);
  3158. return ret;
  3159. }
  3160. /* configure or issue async command */
  3161. static int gpi_config(struct dma_chan *chan,
  3162. struct dma_slave_config *config)
  3163. {
  3164. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3165. struct gpii *gpii = gpii_chan->gpii;
  3166. struct msm_gpi_ctrl *gpi_ctrl = chan->private;
  3167. const int ev_factor = gpii->gpi_dev->ev_factor;
  3168. u32 elements;
  3169. int i = 0;
  3170. int ret = 0;
  3171. GPII_INFO(gpii, gpii_chan->chid, "enter\n");
  3172. if (!gpi_ctrl) {
  3173. GPII_ERR(gpii, gpii_chan->chid,
  3174. "no config ctrl data provided");
  3175. return -EINVAL;
  3176. }
  3177. mutex_lock(&gpii->ctrl_lock);
  3178. switch (gpi_ctrl->cmd) {
  3179. case MSM_GPI_INIT:
  3180. GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
  3181. gpii_chan->client_info.callback = gpi_ctrl->init.callback;
  3182. gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
  3183. gpii_chan->pm_state = CONFIG_STATE;
  3184. /* check if both channels are configured before continue */
  3185. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
  3186. if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
  3187. goto exit_gpi_init;
  3188. /* configure to highest priority from two channels */
  3189. gpii->ev_priority = min(gpii->gpii_chan[0].priority,
  3190. gpii->gpii_chan[1].priority);
  3191. /* protocol must be same for both channels */
  3192. if (gpii->gpii_chan[0].protocol !=
  3193. gpii->gpii_chan[1].protocol) {
  3194. GPII_ERR(gpii, gpii_chan->chid,
  3195. "protocol did not match protocol %u != %u\n",
  3196. gpii->gpii_chan[0].protocol,
  3197. gpii->gpii_chan[1].protocol);
  3198. ret = -EINVAL;
  3199. goto exit_gpi_init;
  3200. }
  3201. gpii->protocol = gpii_chan->protocol;
  3202. /* allocate memory for event ring */
  3203. elements = max(gpii->gpii_chan[0].req_tres,
  3204. gpii->gpii_chan[1].req_tres);
  3205. ret = gpi_alloc_ring(gpii->ev_ring, elements << ev_factor,
  3206. sizeof(union gpi_event), gpii);
  3207. if (ret) {
  3208. GPII_ERR(gpii, gpii_chan->chid,
  3209. "error allocating mem for ev ring\n");
  3210. goto exit_gpi_init;
  3211. }
  3212. /* configure interrupts */
  3213. write_lock_irq(&gpii->pm_lock);
  3214. gpii->pm_state = PREPARE_HARDWARE;
  3215. write_unlock_irq(&gpii->pm_lock);
  3216. ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
  3217. if (ret) {
  3218. GPII_ERR(gpii, gpii_chan->chid,
  3219. "error config. interrupts, ret:%d\n", ret);
  3220. goto error_config_int;
  3221. }
  3222. /* allocate event rings */
  3223. ret = gpi_alloc_ev_chan(gpii);
  3224. if (ret) {
  3225. GPII_ERR(gpii, gpii_chan->chid,
  3226. "error alloc_ev_chan:%d\n", ret);
  3227. goto error_alloc_ev_ring;
  3228. }
  3229. /* Allocate all channels */
  3230. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  3231. ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
  3232. if (ret) {
  3233. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  3234. "Error allocating chan:%d\n", ret);
  3235. goto error_alloc_chan;
  3236. }
  3237. }
  3238. /* start channels */
  3239. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
  3240. ret = gpi_start_chan(&gpii->gpii_chan[i]);
  3241. if (ret) {
  3242. GPII_ERR(gpii, gpii->gpii_chan[i].chid,
  3243. "Error start chan:%d\n", ret);
  3244. goto error_start_chan;
  3245. }
  3246. }
  3247. break;
  3248. case MSM_GPI_CMD_UART_SW_STALE:
  3249. GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
  3250. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
  3251. break;
  3252. case MSM_GPI_CMD_UART_RFR_READY:
  3253. GPII_INFO(gpii, gpii_chan->chid,
  3254. "sending UART RFR READY cmd\n");
  3255. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
  3256. break;
  3257. case MSM_GPI_CMD_UART_RFR_NOT_READY:
  3258. GPII_INFO(gpii, gpii_chan->chid,
  3259. "sending UART RFR READY NOT READY cmd\n");
  3260. ret = gpi_send_cmd(gpii, gpii_chan,
  3261. GPI_CH_CMD_UART_RFR_NOT_READY);
  3262. break;
  3263. default:
  3264. GPII_ERR(gpii, gpii_chan->chid,
  3265. "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
  3266. ret = -EINVAL;
  3267. }
  3268. mutex_unlock(&gpii->ctrl_lock);
  3269. return ret;
  3270. error_start_chan:
  3271. for (i = i - 1; i >= 0; i++) {
  3272. gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  3273. gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
  3274. }
  3275. i = 2;
  3276. error_alloc_chan:
  3277. for (i = i - 1; i >= 0; i--)
  3278. gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
  3279. error_alloc_ev_ring:
  3280. gpi_disable_interrupts(gpii);
  3281. error_config_int:
  3282. gpi_free_ring(gpii->ev_ring, gpii);
  3283. exit_gpi_init:
  3284. mutex_unlock(&gpii->ctrl_lock);
  3285. return ret;
  3286. }
  3287. /* release all channel resources */
  3288. static void gpi_free_chan_resources(struct dma_chan *chan)
  3289. {
  3290. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3291. struct gpii *gpii = gpii_chan->gpii;
  3292. enum gpi_pm_state cur_state;
  3293. int ret, i;
  3294. GPII_INFO(gpii, gpii_chan->chid, "enter\n");
  3295. mutex_lock(&gpii->ctrl_lock);
  3296. cur_state = gpii_chan->pm_state;
  3297. /* disable ch state so no more TRE processing for this channel */
  3298. write_lock_irq(&gpii->pm_lock);
  3299. gpii_chan->pm_state = PREPARE_TERMINATE;
  3300. write_unlock_irq(&gpii->pm_lock);
  3301. /* attempt to do graceful hardware shutdown */
  3302. if (cur_state == ACTIVE_STATE) {
  3303. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
  3304. if (ret)
  3305. GPII_ERR(gpii, gpii_chan->chid,
  3306. "error stopping channel:%d\n", ret);
  3307. ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
  3308. if (ret)
  3309. GPII_ERR(gpii, gpii_chan->chid,
  3310. "error resetting channel:%d\n", ret);
  3311. gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
  3312. }
  3313. /* free all allocated memory */
  3314. gpi_free_ring(gpii_chan->ch_ring, gpii);
  3315. vchan_free_chan_resources(&gpii_chan->vc);
  3316. write_lock_irq(&gpii->pm_lock);
  3317. gpii_chan->pm_state = DISABLE_STATE;
  3318. write_unlock_irq(&gpii->pm_lock);
  3319. /* if other rings are still active exit */
  3320. for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
  3321. if (gpii->gpii_chan[i].ch_ring->configured)
  3322. goto exit_free;
  3323. GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
  3324. /* deallocate EV Ring */
  3325. cur_state = gpii->pm_state;
  3326. write_lock_irq(&gpii->pm_lock);
  3327. gpii->pm_state = PREPARE_TERMINATE;
  3328. write_unlock_irq(&gpii->pm_lock);
  3329. /* wait for threads to complete out */
  3330. tasklet_kill(&gpii->ev_task);
  3331. /* send command to de allocate event ring */
  3332. if (cur_state == ACTIVE_STATE)
  3333. gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
  3334. gpi_free_ring(gpii->ev_ring, gpii);
  3335. /* disable interrupts */
  3336. if (cur_state == ACTIVE_STATE)
  3337. gpi_disable_interrupts(gpii);
  3338. /* set final state to disable */
  3339. write_lock_irq(&gpii->pm_lock);
  3340. gpii->pm_state = DISABLE_STATE;
  3341. write_unlock_irq(&gpii->pm_lock);
  3342. exit_free:
  3343. mutex_unlock(&gpii->ctrl_lock);
  3344. }
  3345. /* allocate channel resources */
  3346. static int gpi_alloc_chan_resources(struct dma_chan *chan)
  3347. {
  3348. struct gpii_chan *gpii_chan = to_gpii_chan(chan);
  3349. struct gpii *gpii = gpii_chan->gpii;
  3350. int ret;
  3351. GPII_INFO(gpii, gpii_chan->chid, "enter\n");
  3352. mutex_lock(&gpii->ctrl_lock);
  3353. /* allocate memory for transfer ring */
  3354. ret = gpi_alloc_ring(gpii_chan->ch_ring, gpii_chan->req_tres,
  3355. sizeof(struct msm_gpi_tre), gpii);
  3356. if (ret) {
  3357. GPII_ERR(gpii, gpii_chan->chid,
  3358. "error allocating xfer ring, ret:%d\n", ret);
  3359. goto xfer_alloc_err;
  3360. }
  3361. mutex_unlock(&gpii->ctrl_lock);
  3362. return 0;
  3363. xfer_alloc_err:
  3364. mutex_unlock(&gpii->ctrl_lock);
  3365. return ret;
  3366. }
  3367. static int gpi_find_static_gpii(struct gpi_dev *gpi_dev, int gpii_no)
  3368. {
  3369. if ((gpi_dev->static_gpii_mask) & (1<<gpii_no))
  3370. return gpii_no;
  3371. return -EIO;
  3372. }
  3373. static int gpi_find_dynamic_gpii(struct gpi_dev *gpi_dev, u32 seid)
  3374. {
  3375. int gpii;
  3376. struct gpii_chan *tx_chan, *rx_chan;
  3377. u32 gpii_mask = gpi_dev->gpii_mask;
  3378. /* check if same seid is already configured for another chid */
  3379. for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
  3380. if (!((1 << gpii) & gpii_mask))
  3381. continue;
  3382. tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
  3383. rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
  3384. if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
  3385. return gpii;
  3386. if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
  3387. return gpii;
  3388. }
  3389. /* no channels configured with same seid, return next avail gpii */
  3390. for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
  3391. if (!((1 << gpii) & gpii_mask))
  3392. continue;
  3393. tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
  3394. rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
  3395. /* check if gpii is configured */
  3396. if (tx_chan->vc.chan.client_count ||
  3397. rx_chan->vc.chan.client_count)
  3398. continue;
  3399. /* found a free gpii */
  3400. return gpii;
  3401. }
  3402. /* no gpii instance available to use */
  3403. return -EIO;
  3404. }
  3405. /* gpi_of_dma_xlate: open client requested channel */
  3406. static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
  3407. struct of_dma *of_dma)
  3408. {
  3409. struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
  3410. u32 seid, chid;
  3411. int gpii, static_gpii_no;
  3412. struct gpii_chan *gpii_chan;
  3413. if (args->args_count < REQ_OF_DMA_ARGS) {
  3414. GPI_ERR(gpi_dev,
  3415. "gpii require minimum 6 args, client passed:%d args\n",
  3416. args->args_count);
  3417. return NULL;
  3418. }
  3419. chid = args->args[0];
  3420. if (chid >= MAX_CHANNELS_PER_GPII) {
  3421. GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
  3422. return NULL;
  3423. }
  3424. seid = args->args[1];
  3425. static_gpii_no = (args->args[4] & STATIC_GPII_BMSK) >> STATIC_GPII_SHFT;
  3426. if (static_gpii_no)
  3427. gpii = gpi_find_static_gpii(gpi_dev, static_gpii_no-1);
  3428. else /* find next available gpii to use */
  3429. gpii = gpi_find_dynamic_gpii(gpi_dev, seid);
  3430. if (gpii < 0) {
  3431. GPI_ERR(gpi_dev, "no available gpii instances\n");
  3432. return NULL;
  3433. }
  3434. gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
  3435. if (gpii_chan->vc.chan.client_count) {
  3436. GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
  3437. gpii, chid, gpii_chan->seid);
  3438. return NULL;
  3439. }
  3440. /* get ring size, protocol, se_id, and priority */
  3441. gpii_chan->seid = seid;
  3442. gpii_chan->protocol = args->args[2];
  3443. if (gpii_chan->protocol == SE_PROTOCOL_Q2SPI)
  3444. gpii_chan->init_config = 1;
  3445. gpii_chan->req_tres = args->args[3];
  3446. gpii_chan->priority = args->args[4] & GPI_EV_PRIORITY_BMSK;
  3447. GPI_LOG(gpi_dev,
  3448. "client req gpii:%u chid:%u #_tre:%u prio:%u proto:%u SE:%d init_config:%d\n",
  3449. gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
  3450. gpii_chan->protocol, gpii_chan->seid, gpii_chan->init_config);
  3451. return dma_get_slave_channel(&gpii_chan->vc.chan);
  3452. }
  3453. /* gpi_setup_debug - setup debug capabilities */
  3454. static void gpi_setup_debug(struct gpi_dev *gpi_dev)
  3455. {
  3456. char node_name[GPI_LABEL_SIZE];
  3457. const umode_t mode = 0600;
  3458. int i;
  3459. snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
  3460. (u64)gpi_dev->res->start);
  3461. gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
  3462. node_name, 0);
  3463. gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
  3464. if (!IS_ERR_OR_NULL(pdentry)) {
  3465. snprintf(node_name, sizeof(node_name), "%llx",
  3466. (u64)gpi_dev->res->start);
  3467. gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
  3468. if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
  3469. debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
  3470. &gpi_dev->ipc_log_lvl);
  3471. debugfs_create_u32("klog_lvl", mode,
  3472. gpi_dev->dentry, &gpi_dev->klog_lvl);
  3473. }
  3474. }
  3475. for (i = 0; i < gpi_dev->max_gpii; i++) {
  3476. struct gpii *gpii;
  3477. if (!(((1 << i) & gpi_dev->gpii_mask) ||
  3478. ((1 << i) & gpi_dev->static_gpii_mask)))
  3479. continue;
  3480. gpii = &gpi_dev->gpiis[i];
  3481. snprintf(gpii->label, sizeof(gpii->label),
  3482. "%s%llx_gpii%d",
  3483. GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
  3484. gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
  3485. gpii->label, 0);
  3486. gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
  3487. gpii->klog_lvl = DEFAULT_KLOG_LVL;
  3488. if (IS_ERR_OR_NULL(gpi_dev->dentry))
  3489. continue;
  3490. snprintf(node_name, sizeof(node_name), "gpii%d", i);
  3491. gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
  3492. if (IS_ERR_OR_NULL(gpii->dentry))
  3493. continue;
  3494. debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
  3495. &gpii->ipc_log_lvl);
  3496. debugfs_create_u32("klog_lvl", mode, gpii->dentry,
  3497. &gpii->klog_lvl);
  3498. }
  3499. }
  3500. static int gpi_probe(struct platform_device *pdev)
  3501. {
  3502. struct gpi_dev *gpi_dev;
  3503. int ret, i;
  3504. u32 gpi_ee_offset;
  3505. gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
  3506. if (!gpi_dev)
  3507. return -ENOMEM;
  3508. /* debug purpose */
  3509. gpi_dev_dbg[arr_idx++] = gpi_dev;
  3510. gpi_dev->dev = &pdev->dev;
  3511. gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
  3512. gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  3513. "gpi-top");
  3514. if (!gpi_dev->res) {
  3515. GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
  3516. return -EINVAL;
  3517. }
  3518. gpi_dev->regs = devm_ioremap(gpi_dev->dev, gpi_dev->res->start,
  3519. resource_size(gpi_dev->res));
  3520. if (!gpi_dev->regs) {
  3521. GPI_ERR(gpi_dev, "IO remap failed\n");
  3522. return -EFAULT;
  3523. }
  3524. gpi_dev->ee_base = gpi_dev->regs;
  3525. ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
  3526. &gpi_dev->max_gpii);
  3527. if (ret) {
  3528. GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
  3529. return ret;
  3530. }
  3531. ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
  3532. &gpi_dev->gpii_mask);
  3533. if (ret) {
  3534. GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
  3535. return ret;
  3536. }
  3537. ret = of_property_read_u32(gpi_dev->dev->of_node,
  3538. "qcom,static-gpii-mask", &gpi_dev->static_gpii_mask);
  3539. if (!ret)
  3540. GPI_LOG(gpi_dev, "static GPII usecase\n");
  3541. ret = of_property_read_u32(gpi_dev->dev->of_node,
  3542. "qcom,gpi-ee-offset", &gpi_ee_offset);
  3543. if (ret)
  3544. GPI_LOG(gpi_dev, "No variable ee offset present\n");
  3545. else
  3546. gpi_dev->ee_base =
  3547. (void *)((u64)gpi_dev->ee_base - gpi_ee_offset);
  3548. ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
  3549. &gpi_dev->ev_factor);
  3550. if (ret) {
  3551. GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
  3552. return ret;
  3553. }
  3554. ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
  3555. if (ret) {
  3556. GPI_ERR(gpi_dev,
  3557. "Error setting dma_mask to 64, ret:%d\n", ret);
  3558. return ret;
  3559. }
  3560. gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
  3561. sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
  3562. GFP_KERNEL);
  3563. if (!gpi_dev->gpiis)
  3564. return -ENOMEM;
  3565. gpi_dev->is_le_vm = of_property_read_bool(pdev->dev.of_node, "qcom,le-vm");
  3566. if (gpi_dev->is_le_vm)
  3567. GPI_LOG(gpi_dev, "LE-VM usecase\n");
  3568. /* setup all the supported gpii */
  3569. INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
  3570. for (i = 0; i < gpi_dev->max_gpii; i++) {
  3571. struct gpii *gpii = &gpi_dev->gpiis[i];
  3572. int chan;
  3573. gpii->is_resumed = true;
  3574. gpii->gpii_chan[0].ch_ring = dmam_alloc_coherent(gpi_dev->dev,
  3575. sizeof(struct gpi_ring),
  3576. &gpii->gpii_chan[0].gpii_chan_dma,
  3577. GFP_KERNEL);
  3578. if (!gpii->gpii_chan[0].ch_ring) {
  3579. GPI_LOG(gpi_dev, "could not allocate for gpii->gpii_chan[0].ch_ring\n");
  3580. return -ENOMEM;
  3581. }
  3582. gpii->gpii_chan[1].ch_ring = dmam_alloc_coherent(gpi_dev->dev,
  3583. sizeof(struct gpi_ring),
  3584. &gpii->gpii_chan[1].gpii_chan_dma,
  3585. GFP_KERNEL);
  3586. if (!gpii->gpii_chan[1].ch_ring) {
  3587. GPI_LOG(gpi_dev, "could not allocate for gpii->gpii_chan[1].ch_ring\n");
  3588. return -ENOMEM;
  3589. }
  3590. gpii->ev_ring = dmam_alloc_coherent(gpi_dev->dev,
  3591. sizeof(struct gpi_ring),
  3592. &gpii->event_dma_addr, GFP_KERNEL);
  3593. if (!gpii->ev_ring) {
  3594. GPI_LOG(gpi_dev, "could not allocate for gpii->ev_ring\n");
  3595. return -ENOMEM;
  3596. }
  3597. if (!(((1 << i) & gpi_dev->gpii_mask) ||
  3598. ((1 << i) & gpi_dev->static_gpii_mask)))
  3599. continue;
  3600. /* set up ev cntxt register map */
  3601. gpii->ev_cntxt_base_reg = gpi_dev->ee_base +
  3602. GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
  3603. gpii->ev_cntxt_db_reg = gpi_dev->ee_base +
  3604. GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
  3605. gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
  3606. CNTXT_2_RING_BASE_LSB;
  3607. gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
  3608. CNTXT_4_RING_RP_LSB;
  3609. gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
  3610. CNTXT_6_RING_WP_LSB;
  3611. gpii->ev_cmd_reg = gpi_dev->ee_base +
  3612. GPI_GPII_n_EV_CH_CMD_OFFS(i);
  3613. gpii->ieob_src_reg = gpi_dev->ee_base +
  3614. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
  3615. gpii->ieob_clr_reg = gpi_dev->ee_base +
  3616. GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
  3617. /* set up irq */
  3618. ret = platform_get_irq(pdev, i);
  3619. if (ret < 0) {
  3620. GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
  3621. i, ret);
  3622. return ret;
  3623. }
  3624. gpii->irq = ret;
  3625. /* set up channel specific register info */
  3626. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  3627. struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
  3628. /* set up ch cntxt register map */
  3629. gpii_chan->ch_cntxt_base_reg = gpi_dev->ee_base +
  3630. GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
  3631. gpii_chan->ch_cntxt_db_reg = gpi_dev->ee_base +
  3632. GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
  3633. gpii_chan->ch_ring_base_lsb_reg =
  3634. gpii_chan->ch_cntxt_base_reg +
  3635. CNTXT_2_RING_BASE_LSB;
  3636. gpii_chan->ch_ring_rp_lsb_reg =
  3637. gpii_chan->ch_cntxt_base_reg +
  3638. CNTXT_4_RING_RP_LSB;
  3639. gpii_chan->ch_ring_wp_lsb_reg =
  3640. gpii_chan->ch_cntxt_base_reg +
  3641. CNTXT_6_RING_WP_LSB;
  3642. gpii_chan->ch_cmd_reg = gpi_dev->ee_base +
  3643. GPI_GPII_n_CH_CMD_OFFS(i);
  3644. /* vchan setup */
  3645. vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
  3646. gpii_chan->vc.desc_free = gpi_desc_free;
  3647. gpii_chan->chid = chan;
  3648. gpii_chan->gpii = gpii;
  3649. gpii_chan->dir = GPII_CHAN_DIR[chan];
  3650. }
  3651. mutex_init(&gpii->ctrl_lock);
  3652. rwlock_init(&gpii->pm_lock);
  3653. tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
  3654. (unsigned long)gpii);
  3655. init_completion(&gpii->cmd_completion);
  3656. gpii->gpii_id = i;
  3657. gpii->regs = gpi_dev->ee_base;
  3658. gpii->gpi_dev = gpi_dev;
  3659. atomic_set(&gpii->dbg_index, 0);
  3660. }
  3661. platform_set_drvdata(pdev, gpi_dev);
  3662. /* clear and Set capabilities */
  3663. dma_cap_zero(gpi_dev->dma_device.cap_mask);
  3664. dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
  3665. /* configure dmaengine apis */
  3666. gpi_dev->dma_device.directions =
  3667. BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  3668. gpi_dev->dma_device.residue_granularity =
  3669. DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  3670. gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
  3671. gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
  3672. gpi_dev->dma_device.device_alloc_chan_resources =
  3673. gpi_alloc_chan_resources;
  3674. gpi_dev->dma_device.device_free_chan_resources =
  3675. gpi_free_chan_resources;
  3676. gpi_dev->dma_device.device_tx_status = dma_cookie_status;
  3677. gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
  3678. gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
  3679. gpi_dev->dma_device.device_config = gpi_config;
  3680. gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
  3681. gpi_dev->dma_device.dev = gpi_dev->dev;
  3682. gpi_dev->dma_device.device_pause = gpi_pause;
  3683. gpi_dev->dma_device.device_resume = gpi_resume;
  3684. /* register with dmaengine framework */
  3685. ret = dma_async_device_register(&gpi_dev->dma_device);
  3686. if (ret) {
  3687. GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
  3688. return ret;
  3689. }
  3690. ret = of_dma_controller_register(gpi_dev->dev->of_node,
  3691. gpi_of_dma_xlate, gpi_dev);
  3692. if (ret) {
  3693. GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
  3694. return ret;
  3695. }
  3696. /* setup debug capabilities */
  3697. gpi_setup_debug(gpi_dev);
  3698. GPI_LOG(gpi_dev, "probe success\n");
  3699. return ret;
  3700. }
  3701. static int gpi_remove(struct platform_device *pdev)
  3702. {
  3703. struct gpi_dev *gpi_dev = platform_get_drvdata(pdev);
  3704. int i;
  3705. of_dma_controller_free(gpi_dev->dev->of_node);
  3706. dma_async_device_unregister(&gpi_dev->dma_device);
  3707. for (i = 0; i < gpi_dev->max_gpii; i++) {
  3708. struct gpii *gpii = &gpi_dev->gpiis[i];
  3709. int chan;
  3710. if (!(((1 << i) & gpi_dev->gpii_mask) ||
  3711. ((1 << i) & gpi_dev->static_gpii_mask)))
  3712. continue;
  3713. for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
  3714. struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
  3715. gpi_free_chan_resources(&gpii_chan->vc.chan);
  3716. }
  3717. if (gpii->ilctxt)
  3718. ipc_log_context_destroy(gpii->ilctxt);
  3719. }
  3720. for (i = 0; i < arr_idx; i++)
  3721. gpi_dev_dbg[i] = NULL;
  3722. arr_idx = 0;
  3723. if (gpi_dev->ilctxt)
  3724. ipc_log_context_destroy(gpi_dev->ilctxt);
  3725. debugfs_remove(pdentry);
  3726. return 0;
  3727. }
  3728. static const struct of_device_id gpi_of_match[] = {
  3729. { .compatible = "qcom,gpi-dma" },
  3730. {}
  3731. };
  3732. MODULE_DEVICE_TABLE(of, gpi_of_match);
  3733. static struct platform_driver gpi_driver = {
  3734. .probe = gpi_probe,
  3735. .remove = gpi_remove,
  3736. .driver = {
  3737. .name = GPI_DMA_DRV_NAME,
  3738. .of_match_table = gpi_of_match,
  3739. },
  3740. };
  3741. static int __init gpi_init(void)
  3742. {
  3743. pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
  3744. return platform_driver_register(&gpi_driver);
  3745. }
  3746. static void __exit gpi_exit(void)
  3747. {
  3748. platform_driver_unregister(&gpi_driver);
  3749. }
  3750. module_init(gpi_init);
  3751. module_exit(gpi_exit);
  3752. MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
  3753. MODULE_LICENSE("GPL");