q2spi-msm-geni.c 148 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/delay.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/fs.h>
  10. #include <linux/idr.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/irq.h>
  14. #include <linux/kernel.h>
  15. #include <linux/kthread.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_gpio.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/pinctrl/consumer.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/uaccess.h>
  23. #include "q2spi-msm.h"
  24. #include "q2spi-slave-reg.h"
  25. #define CREATE_TRACE_POINTS
  26. #include "q2spi-trace.h"
  27. static int q2spi_slave_init(struct q2spi_geni *q2spi);
  28. static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt);
  29. static struct q2spi_geni *get_q2spi(struct device *dev);
  30. static int q2spi_geni_runtime_resume(struct device *dev);
  31. /* FTRACE Logging */
  32. void q2spi_trace_log(struct device *dev, const char *fmt, ...)
  33. {
  34. struct va_format vaf = {
  35. .fmt = fmt,
  36. };
  37. va_list args;
  38. va_start(args, fmt);
  39. vaf.va = &args;
  40. trace_q2spi_log_info(dev_name(dev), &vaf);
  41. va_end(args);
  42. }
  43. /**
  44. * q2spi_kzalloc - allocate kernel memory
  45. * @q2spi: Pointer to main q2spi_geni structure
  46. * @size: Size of the memory to allocate
  47. * @line: line number from where allocation is invoked
  48. *
  49. * Return: Pointer to allocated memory on success, NULL on failure.
  50. */
  51. void *q2spi_kzalloc(struct q2spi_geni *q2spi, int size, int line)
  52. {
  53. void *ptr = kzalloc(size, GFP_ATOMIC);
  54. if (ptr) {
  55. atomic_inc(&q2spi->alloc_count);
  56. Q2SPI_DEBUG(q2spi, "Allocated 0x%p at %d, count:%d\n",
  57. ptr, line, atomic_read(&q2spi->alloc_count));
  58. }
  59. return ptr;
  60. }
  61. /**
  62. * q2spi_kfree - free kernel memory allocated by q2spi_kzalloc()
  63. * @q2spi: Pointer to main q2spi_geni structure
  64. * @ptr: address to be freed
  65. * @line: line number from where free is invoked
  66. *
  67. * Return: None
  68. */
  69. void q2spi_kfree(struct q2spi_geni *q2spi, void *ptr, int line)
  70. {
  71. if (ptr) {
  72. atomic_dec(&q2spi->alloc_count);
  73. kfree(ptr);
  74. }
  75. Q2SPI_DEBUG(q2spi, "Freeing 0x%p from %d, count:%d\n",
  76. ptr, line, atomic_read(&q2spi->alloc_count));
  77. }
  78. void __q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix,
  79. char *str, int total, int offset, int size)
  80. {
  81. char buf[DATA_BYTES_PER_LINE * 5];
  82. char data[DATA_BYTES_PER_LINE * 5];
  83. int len = min(size, DATA_BYTES_PER_LINE);
  84. hex_dump_to_buffer(str, len, DATA_BYTES_PER_LINE, 1, buf, sizeof(buf), false);
  85. scnprintf(data, sizeof(data), "%s[%d-%d of %d]: %s", prefix, offset + 1,
  86. offset + len, total, buf);
  87. Q2SPI_DEBUG(q2spi, "%s: %s\n", __func__, data);
  88. }
  89. /**
  90. * q2spi_dump_ipc - Log dump function for debugging
  91. * @q2spi: Pointer to main q2spi_geni structure
  92. * @ipc_ctx: IPC context pointer to dump logs in IPC
  93. * @prefix: Prefix to use in log
  94. * @str: String to dump in log
  95. * @size: Size of data bytes per line
  96. *
  97. * free bulk dma mapped buffers allocated by q2spi_pre_alloc_buffers api
  98. *
  99. * Return: none
  100. */
  101. void q2spi_dump_ipc(struct q2spi_geni *q2spi, void *ipc_ctx, char *prefix,
  102. char *str, int size)
  103. {
  104. int offset = 0, total_bytes = size;
  105. if (!str) {
  106. Q2SPI_ERROR(q2spi, "%s: Err str is NULL\n", __func__);
  107. return;
  108. }
  109. if (q2spi->max_data_dump_size > 0 && size > q2spi->max_data_dump_size)
  110. size = q2spi->max_data_dump_size;
  111. while (size > Q2SPI_DATA_DUMP_SIZE) {
  112. __q2spi_dump_ipc(q2spi, prefix, (char *)str + offset, total_bytes,
  113. offset, Q2SPI_DATA_DUMP_SIZE);
  114. offset += Q2SPI_DATA_DUMP_SIZE;
  115. size -= Q2SPI_DATA_DUMP_SIZE;
  116. }
  117. __q2spi_dump_ipc(q2spi, prefix, (char *)str + offset, total_bytes, offset, size);
  118. }
  119. /*
  120. * max_dump_size_show() - Prints the value stored in max_dump_size sysfs entry
  121. *
  122. * @dev: pointer to device
  123. * @attr: device attributes
  124. * @buf: buffer to store the max_dump_size value
  125. *
  126. * Return: prints max_dump_size value
  127. */
  128. static ssize_t max_dump_size_show(struct device *dev, struct device_attribute *attr, char *buf)
  129. {
  130. struct q2spi_geni *q2spi = get_q2spi(dev);
  131. return scnprintf(buf, sizeof(int), "%d\n", q2spi->max_data_dump_size);
  132. }
  133. /*
  134. * max_dump_size_store() - store the max_dump_size sysfs value
  135. *
  136. * @dev: pointer to device
  137. * @attr: device attributes
  138. * @buf: buffer which contains the max_dump_size in string format
  139. * @size: returns the value of size
  140. *
  141. * Return: Size copied in the buffer
  142. */
  143. static ssize_t max_dump_size_store(struct device *dev, struct device_attribute *attr,
  144. const char *buf, size_t size)
  145. {
  146. struct q2spi_geni *q2spi = get_q2spi(dev);
  147. if (kstrtoint(buf, 0, &q2spi->max_data_dump_size)) {
  148. dev_err(dev, "%s Invalid input\n", __func__);
  149. return -EINVAL;
  150. }
  151. if (q2spi->max_data_dump_size <= 0)
  152. q2spi->max_data_dump_size = Q2SPI_DATA_DUMP_SIZE;
  153. return size;
  154. }
  155. static DEVICE_ATTR_RW(max_dump_size);
  156. /**
  157. * q2spi_pkt_state - Returns q2spi packet state in string format
  158. * @q2spi_pkt: Pointer to q2spi_packet
  159. *
  160. * Return: q2spi packet state in string format
  161. */
  162. const char *q2spi_pkt_state(struct q2spi_packet *q2spi_pkt)
  163. {
  164. if (q2spi_pkt->state == NOT_IN_USE)
  165. return "NOT IN USE";
  166. else if (q2spi_pkt->state == IN_USE)
  167. return "IN_USE";
  168. else if (q2spi_pkt->state == DATA_AVAIL)
  169. return "DATA_AVAIL";
  170. else if (q2spi_pkt->state == IN_DELETION)
  171. return "IN_DELETION";
  172. else if (q2spi_pkt->state == DELETED)
  173. return "DELETED";
  174. else
  175. return "ERR UNKNOWN STATE";
  176. }
  177. /**
  178. * q2spi_tx_queue_status - Logs tx_queue list status empty/not-empty
  179. * @q2spi: Pointer to main q2spi_geni structure
  180. *
  181. * Return: None
  182. */
  183. void q2spi_tx_queue_status(struct q2spi_geni *q2spi)
  184. {
  185. if (list_empty(&q2spi->tx_queue_list))
  186. Q2SPI_DEBUG(q2spi, "%s tx_queue empty\n", __func__);
  187. else
  188. Q2SPI_DEBUG(q2spi, "%s tx_queue not empty!\n", __func__);
  189. }
  190. /**
  191. * q2spi_free_q2spi_pkt - Deallocates the q2spi_pkt
  192. * @q2spi_pkt: Pointer to q2spi_pkt to be deleted
  193. *
  194. * Return: None
  195. */
  196. void q2spi_free_q2spi_pkt(struct q2spi_packet *q2spi_pkt, int line)
  197. {
  198. if (q2spi_pkt->xfer) {
  199. Q2SPI_DEBUG(q2spi_pkt->q2spi, "%s q2spi_pkt=%p q2spi_pkt->xfer=%p\n",
  200. __func__, q2spi_pkt, q2spi_pkt->xfer);
  201. q2spi_kfree(q2spi_pkt->q2spi, q2spi_pkt->xfer, line);
  202. q2spi_kfree(q2spi_pkt->q2spi, q2spi_pkt, line);
  203. q2spi_pkt = NULL;
  204. }
  205. }
  206. /**
  207. * q2spi_alloc_q2spi_pkt - Allocates memory for q2spi_pkt
  208. * @q2spi: Pointer to main q2spi_geni structure
  209. *
  210. * Return: Upon successful memory allocation returns pointer of q2spi_pkt, else NULL
  211. */
  212. struct q2spi_packet *q2spi_alloc_q2spi_pkt(struct q2spi_geni *q2spi, int line)
  213. {
  214. struct q2spi_packet *q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet), line);
  215. if (!q2spi_pkt) {
  216. Q2SPI_ERROR(q2spi, "%s Err q2spi_pkt alloc fail\n", __func__);
  217. return NULL;
  218. }
  219. q2spi_pkt->xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer), line);
  220. if (!q2spi_pkt->xfer) {
  221. Q2SPI_ERROR(q2spi, "%s Err xfer alloc failed\n", __func__);
  222. q2spi_kfree(q2spi, q2spi_pkt, __LINE__);
  223. q2spi_pkt = NULL;
  224. return NULL;
  225. }
  226. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt=%p PID=%d\n", __func__, q2spi_pkt, current->pid);
  227. init_completion(&q2spi_pkt->bulk_wait);
  228. init_completion(&q2spi_pkt->wait_for_db);
  229. q2spi_pkt->q2spi = q2spi;
  230. return q2spi_pkt;
  231. }
  232. /**
  233. * q2spi_free_bulk_buf - free bulk buffers from pool
  234. * @q2spi: Pointer to main q2spi_geni structure
  235. *
  236. * free bulk dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  237. *
  238. * Return: 0 for success, negative number if buffer is not found
  239. */
  240. static int q2spi_free_bulk_buf(struct q2spi_geni *q2spi)
  241. {
  242. void *buf;
  243. dma_addr_t dma_addr;
  244. int i;
  245. size_t size;
  246. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  247. if (!q2spi->bulk_buf[i])
  248. continue;
  249. if (q2spi->bulk_buf_used[i])
  250. return -1;
  251. buf = q2spi->bulk_buf[i];
  252. dma_addr = q2spi->bulk_dma_buf[i];
  253. size = sizeof(struct q2spi_client_bulk_access_pkt);
  254. geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size);
  255. }
  256. return 0;
  257. }
  258. /**
  259. * q2spi_free_cr_buf - free cr buffers from pool
  260. * @q2spi: Pointer to main q2spi_geni structure
  261. *
  262. * free cr dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  263. *
  264. * Return: 0 for success, negative number if buffer is not found
  265. */
  266. static int q2spi_free_cr_buf(struct q2spi_geni *q2spi)
  267. {
  268. void *buf;
  269. dma_addr_t dma_addr;
  270. int i;
  271. size_t size;
  272. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  273. if (!q2spi->cr_buf[i])
  274. continue;
  275. if (q2spi->cr_buf_used[i])
  276. return -1;
  277. buf = q2spi->cr_buf[i];
  278. dma_addr = q2spi->cr_dma_buf[i];
  279. size = sizeof(struct q2spi_client_dma_pkt);
  280. geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size);
  281. }
  282. return 0;
  283. }
  284. /**
  285. * q2spi_free_var5_buf - free var5 buffers from pool
  286. * @q2spi: Pointer to main q2spi_geni structure
  287. *
  288. * free var5 dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  289. *
  290. * Return: 0 for success, negative number if buffer is not found
  291. */
  292. static int q2spi_free_var5_buf(struct q2spi_geni *q2spi)
  293. {
  294. void *buf;
  295. dma_addr_t dma_addr;
  296. int i;
  297. size_t size;
  298. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  299. if (!q2spi->var5_buf[i])
  300. continue;
  301. if (q2spi->var5_buf_used[i])
  302. return -1;
  303. buf = q2spi->var5_buf[i];
  304. dma_addr = q2spi->var5_dma_buf[i];
  305. size = sizeof(struct q2spi_host_variant4_5_pkt);
  306. geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size);
  307. }
  308. return 0;
  309. }
  310. /**
  311. * q2spi_free_var1_buf - free var1 buffers from pool
  312. * @q2spi: Pointer to main q2spi_geni structure
  313. *
  314. * free var1 dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  315. *
  316. * Return: 0 for success, negative number if buffer is not found
  317. */
  318. static int q2spi_free_var1_buf(struct q2spi_geni *q2spi)
  319. {
  320. void *buf;
  321. dma_addr_t dma_addr;
  322. int i;
  323. size_t size;
  324. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  325. if (!q2spi->var1_buf[i])
  326. continue;
  327. if (q2spi->var1_buf_used[i])
  328. return -1;
  329. buf = q2spi->var1_buf[i];
  330. dma_addr = q2spi->var1_dma_buf[i];
  331. size = sizeof(struct q2spi_host_variant1_pkt);
  332. geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size);
  333. }
  334. return 0;
  335. }
  336. /**
  337. * q2spi_free_resp_buf - free resp buffers from pool
  338. * @q2spi: Pointer to main q2spi_geni structure
  339. *
  340. * free response dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  341. *
  342. * Return: 0 for success, negative number if buffer is not found
  343. */
  344. static int q2spi_free_resp_buf(struct q2spi_geni *q2spi)
  345. {
  346. void *buf;
  347. dma_addr_t dma_addr;
  348. int i;
  349. size_t size;
  350. for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) {
  351. if (!q2spi->resp_buf[i])
  352. continue;
  353. if (q2spi->resp_buf_used[i])
  354. return -1;
  355. buf = q2spi->resp_buf[i];
  356. dma_addr = q2spi->resp_dma_buf[i];
  357. size = Q2SPI_RESP_BUF_SIZE;
  358. geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size);
  359. }
  360. return 0;
  361. }
  362. /**
  363. * q2spi_free_dma_buf - free preallocated dma mapped buffers
  364. * @q2spi: Pointer to main q2spi_geni structure
  365. *
  366. * free dma mapped buffers allocated by q2spi_pre_alloc_buffers api.
  367. *
  368. * Return: None
  369. */
  370. static void q2spi_free_dma_buf(struct q2spi_geni *q2spi)
  371. {
  372. if (q2spi_free_bulk_buf(q2spi))
  373. Q2SPI_ERROR(q2spi, "%s Err free bulk buf fail\n", __func__);
  374. if (q2spi_free_cr_buf(q2spi))
  375. Q2SPI_ERROR(q2spi, "%s Err free cr buf fail\n", __func__);
  376. if (q2spi_free_var5_buf(q2spi))
  377. Q2SPI_ERROR(q2spi, "%s Err free var5 buf fail\n", __func__);
  378. if (q2spi_free_var1_buf(q2spi))
  379. Q2SPI_ERROR(q2spi, "%s Err free var1 buf fail\n", __func__);
  380. if (q2spi_free_resp_buf(q2spi))
  381. Q2SPI_ERROR(q2spi, "%s Err free resp buf fail\n", __func__);
  382. }
  383. /**
  384. * q2spi_pre_alloc_buffers - Allocate iommu mapped buffres
  385. * @q2spi: Pointer to main q2spi_geni structure
  386. *
  387. * This function allocates Q2SPI_MAX_BUF buffers of Variant_1 type
  388. * packets and Q2SPI_MAX_BUF bufferes of Variant_5 type packets and
  389. * Q2SPI_MAX_BUF bufferes of CR type 3.
  390. * This function will allocate and map into QUPV3 core context bank.
  391. *
  392. * Return: 0 for success, negative number for error condition.
  393. */
  394. static int q2spi_pre_alloc_buffers(struct q2spi_geni *q2spi)
  395. {
  396. int i;
  397. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  398. q2spi->var1_buf[i] =
  399. geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->var1_dma_buf[i],
  400. sizeof(struct q2spi_host_variant1_pkt));
  401. if (IS_ERR_OR_NULL(q2spi->var1_buf[i])) {
  402. Q2SPI_ERROR(q2spi, "%s Err var1 buf alloc fail\n", __func__);
  403. goto exit_dealloc;
  404. }
  405. q2spi->var5_buf[i] =
  406. geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev,
  407. &q2spi->var5_dma_buf[i], (SMA_BUF_SIZE +
  408. sizeof(struct q2spi_host_variant4_5_pkt)));
  409. if (IS_ERR_OR_NULL(q2spi->var5_buf[i])) {
  410. Q2SPI_ERROR(q2spi, "%s Err var5 buf alloc fail\n", __func__);
  411. goto exit_dealloc;
  412. }
  413. q2spi->cr_buf[i] =
  414. geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->cr_dma_buf[i],
  415. RX_DMA_CR_BUF_SIZE);
  416. if (IS_ERR_OR_NULL(q2spi->cr_buf[i])) {
  417. Q2SPI_ERROR(q2spi, "%s Err cr buf alloc fail\n", __func__);
  418. goto exit_dealloc;
  419. }
  420. q2spi->bulk_buf[i] =
  421. geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->bulk_dma_buf[i],
  422. sizeof(struct
  423. q2spi_client_bulk_access_pkt));
  424. if (IS_ERR_OR_NULL(q2spi->bulk_buf[i])) {
  425. Q2SPI_ERROR(q2spi, "%s Err bulk buf alloc fail\n", __func__);
  426. goto exit_dealloc;
  427. }
  428. Q2SPI_DEBUG(q2spi, "%s var1_buf[%d] virt:%p phy:%p\n", __func__, i,
  429. (void *)q2spi->var1_buf[i], (void *)q2spi->var1_dma_buf[i]);
  430. Q2SPI_DEBUG(q2spi, "%s var5_buf[%d] virt:%p phy:%p\n", __func__, i,
  431. (void *)q2spi->var5_buf[i], (void *)q2spi->var5_dma_buf[i]);
  432. Q2SPI_DEBUG(q2spi, "%s cr_buf[%d] virt:%p phy:%p\n", __func__, i,
  433. (void *)q2spi->cr_buf[i], (void *)q2spi->cr_dma_buf[i]);
  434. Q2SPI_DEBUG(q2spi, "%s bulk_buf[%d] virt:%p phy:%p\n", __func__, i,
  435. (void *)q2spi->bulk_buf[i], (void *)q2spi->bulk_dma_buf[i]);
  436. }
  437. for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) {
  438. q2spi->resp_buf[i] =
  439. geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->resp_dma_buf[i],
  440. Q2SPI_RESP_BUF_SIZE);
  441. if (IS_ERR_OR_NULL(q2spi->resp_buf[i])) {
  442. Q2SPI_ERROR(q2spi, "%s Err resp buf alloc fail\n", __func__);
  443. goto exit_dealloc;
  444. }
  445. Q2SPI_DEBUG(q2spi, "%s resp_buf[%d] virt:%p phy:%p\n", __func__, i,
  446. (void *)q2spi->resp_buf[i], (void *)q2spi->resp_dma_buf[i]);
  447. }
  448. return 0;
  449. exit_dealloc:
  450. q2spi_free_dma_buf(q2spi);
  451. return -ENOMEM;
  452. }
  453. /**
  454. * q2spi_unmap_dma_buf_used - Unmap dma buffer used
  455. * @q2spi: Pointer to main q2spi_geni structure
  456. * @tx_dma: TX dma pointer
  457. * @rx_dma: RX dma pointer
  458. *
  459. * This function marks buffer used to free so that we are reuse the buffers.
  460. *
  461. */
  462. static void
  463. q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t rx_dma)
  464. {
  465. int i = 0;
  466. bool unmapped = false;
  467. if (!tx_dma && !rx_dma) {
  468. Q2SPI_ERROR(q2spi, "%s Err TX/RX dma buffer NULL\n", __func__);
  469. return;
  470. }
  471. Q2SPI_DEBUG(q2spi, "%s PID:%d for tx_dma:%p rx_dma:%p\n", __func__,
  472. current->pid, (void *)tx_dma, (void *)rx_dma);
  473. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  474. if (tx_dma == q2spi->var1_dma_buf[i]) {
  475. if (q2spi->var1_buf_used[i]) {
  476. Q2SPI_DEBUG(q2spi, "%s UNMAP var1_buf[%d] virt:%p phy:%p\n",
  477. __func__, i, (void *)q2spi->var1_buf[i],
  478. (void *)q2spi->var1_dma_buf[i]);
  479. q2spi->var1_buf_used[i] = NULL;
  480. unmapped = true;
  481. }
  482. } else if (tx_dma == q2spi->var5_dma_buf[i]) {
  483. if (q2spi->var5_buf_used[i]) {
  484. Q2SPI_DEBUG(q2spi, "%s UNMAP var5_buf[%d] virt:%p phy:%p\n",
  485. __func__, i, (void *)q2spi->var5_buf[i],
  486. (void *)q2spi->var5_dma_buf[i]);
  487. q2spi->var5_buf_used[i] = NULL;
  488. unmapped = true;
  489. }
  490. }
  491. if (rx_dma == q2spi->cr_dma_buf[i]) {
  492. if (q2spi->cr_buf_used[i]) {
  493. Q2SPI_DEBUG(q2spi, "%s UNMAP cr_buf[%d] virt:%p phy:%p\n",
  494. __func__, i, (void *)q2spi->cr_buf[i],
  495. (void *)q2spi->cr_dma_buf[i]);
  496. q2spi->cr_buf_used[i] = NULL;
  497. unmapped = true;
  498. }
  499. } else if (rx_dma == q2spi->bulk_dma_buf[i]) {
  500. if (q2spi->bulk_buf_used[i]) {
  501. Q2SPI_DEBUG(q2spi, "%s UNMAP bulk_buf[%d] virt:%p phy:%p\n",
  502. __func__, i, (void *)q2spi->bulk_buf[i],
  503. (void *)q2spi->bulk_dma_buf[i]);
  504. q2spi->bulk_buf_used[i] = NULL;
  505. unmapped = true;
  506. }
  507. }
  508. }
  509. if (!unmapped)
  510. Q2SPI_ERROR(q2spi, "%s PID:%d Err unmap fail for tx_dma:%p rx_dma:%p\n",
  511. __func__, current->pid, (void *)tx_dma, (void *)rx_dma);
  512. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  513. }
  514. /**
  515. * q2spi_unmap_var_bufs - function which checks for q2spi variant type and
  516. * unmap the buffers
  517. * @q2spi: pointer to q2spi_geni
  518. * @q2spi_packet: pointer to q2spi_packet
  519. *
  520. * Return: None
  521. */
  522. void q2spi_unmap_var_bufs(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  523. {
  524. if (q2spi_pkt->vtype == VARIANT_1_LRA || q2spi_pkt->vtype == VARIANT_1_HRF) {
  525. Q2SPI_DEBUG(q2spi, "%s Unmapping Var1 buffers..\n", __func__);
  526. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var1_tx_dma,
  527. q2spi_pkt->var1_rx_dma);
  528. } else if (q2spi_pkt->vtype == VARIANT_5) {
  529. Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffers..\n", __func__);
  530. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma,
  531. q2spi_pkt->var5_rx_dma);
  532. } else if (q2spi_pkt->vtype == VARIANT_5_HRF) {
  533. Q2SPI_DEBUG(q2spi, "%s Unmapping Var1 and Var5 buffers..\n",
  534. __func__);
  535. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var1_tx_dma,
  536. (dma_addr_t)NULL);
  537. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma,
  538. q2spi_pkt->var5_rx_dma);
  539. }
  540. }
  541. /**
  542. * q2spi_get_doorbell_rx_buf - allocate RX DMA buffer to GSI
  543. * @q2spi: Pointer to main q2spi_geni structure
  544. *
  545. * This function will get one RX buffer from pool of buffers
  546. * allocated using q2spi_pre_alloc_buffers() and prepare RX DMA
  547. * descriptor and map to GSI.
  548. * This RX buffer is used to receive doorbell from GSI.
  549. *
  550. * Return: 0 for success, negative number for error condition.
  551. */
  552. static int q2spi_get_doorbell_rx_buf(struct q2spi_geni *q2spi)
  553. {
  554. struct q2spi_dma_transfer *xfer = q2spi->db_xfer;
  555. int i;
  556. /* Pick rx buffers from pre allocated pool */
  557. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  558. if (!q2spi->cr_buf_used[i]) {
  559. Q2SPI_DEBUG(q2spi, "%s q2spi_db_xfer:%p\n", __func__, q2spi->db_xfer);
  560. xfer->rx_buf = q2spi->cr_buf[i];
  561. xfer->rx_dma = q2spi->cr_dma_buf[i];
  562. q2spi->cr_buf_used[i] = q2spi->cr_buf[i];
  563. q2spi->rx_buf = xfer->rx_buf;
  564. Q2SPI_DEBUG(q2spi, "ALLOC %s db rx_buf:%p rx_dma:%p\n",
  565. __func__, xfer->rx_buf, (void *)xfer->rx_dma);
  566. memset(xfer->rx_buf, 0xdb, RX_DMA_CR_BUF_SIZE);
  567. return 0;
  568. }
  569. }
  570. Q2SPI_ERROR(q2spi, "%s Err DB RX dma alloc failed\n", __func__);
  571. return -ENOMEM;
  572. }
  573. /**
  574. * q2spi_unmap_rx_buf - release RX DMA buffers
  575. * @q2spi_pkt: Pointer to q2spi packet
  576. *
  577. * This function will release rx buffers back to preallocated pool
  578. *
  579. * Return: None
  580. */
  581. static void q2spi_unmap_rx_buf(struct q2spi_packet *q2spi_pkt)
  582. {
  583. struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer;
  584. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  585. int i = 0;
  586. bool unmapped = false;
  587. if (!xfer->rx_buf || !xfer->rx_dma) {
  588. Q2SPI_ERROR(q2spi, "%s Err RX buffer NULL\n", __func__);
  589. return;
  590. }
  591. Q2SPI_DEBUG(q2spi, "%s PID:%d rx_buf %p %p\n", __func__,
  592. current->pid, (void *)xfer->rx_buf, (void *)xfer->rx_dma);
  593. for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) {
  594. if (xfer->rx_dma == q2spi->resp_dma_buf[i]) {
  595. if (q2spi->resp_buf_used[i]) {
  596. Q2SPI_DEBUG(q2spi, "%s UNMAP rx_buf[%d] virt:%p phy:%p\n",
  597. __func__, i, (void *)q2spi->resp_buf[i],
  598. (void *)q2spi->resp_dma_buf[i]);
  599. q2spi->resp_buf_used[i] = NULL;
  600. unmapped = true;
  601. }
  602. }
  603. }
  604. if (!unmapped)
  605. Q2SPI_ERROR(q2spi, "%s PID:%d Err unmap fail for rx_dma:%p\n",
  606. __func__, current->pid, (void *)xfer->rx_dma);
  607. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  608. }
  609. /**
  610. * q2spi_get_rx_buf - obtain RX DMA buffer from preallocated pool
  611. * @q2spi_pkt: Pointer to q2spi packet
  612. * @len: size of the memory to be allocate
  613. *
  614. * This function will allocate RX dma_alloc_coherant memory
  615. * of the length specified. This RX buffer is used to
  616. * receive rx data from slave.
  617. *
  618. * Return: 0 for success, negative number for error condition.
  619. */
  620. static int q2spi_get_rx_buf(struct q2spi_packet *q2spi_pkt, int len)
  621. {
  622. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  623. struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer;
  624. int i;
  625. Q2SPI_DEBUG(q2spi, "%s len:%d\n", __func__, len);
  626. if (!len) {
  627. Q2SPI_ERROR(q2spi, "%s Err Zero length for alloc\n", __func__);
  628. return -EINVAL;
  629. }
  630. for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) {
  631. if (!q2spi->resp_buf_used[i]) {
  632. q2spi->resp_buf_used[i] = q2spi->resp_buf[i];
  633. xfer->rx_buf = q2spi->resp_buf[i];
  634. xfer->rx_dma = q2spi->resp_dma_buf[i];
  635. memset(xfer->rx_buf, 0xba, Q2SPI_RESP_BUF_SIZE);
  636. Q2SPI_DEBUG(q2spi, "%s ALLOC rx buf %p dma_buf:%p\n",
  637. __func__, (void *)q2spi->resp_buf[i],
  638. (void *)q2spi->resp_dma_buf[i]);
  639. return 0;
  640. }
  641. }
  642. Q2SPI_ERROR(q2spi, "%s: Err short of RX dma buffers\n", __func__);
  643. return -ENOMEM;
  644. }
  645. static int q2spi_hrf_entry_format_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  646. struct q2spi_request **q2spi_hrf_req_ptr)
  647. {
  648. struct q2spi_request *q2spi_hrf_req = NULL;
  649. struct q2spi_mc_hrf_entry hrf_entry;
  650. q2spi_hrf_req = q2spi_kzalloc(q2spi, sizeof(struct q2spi_request), __LINE__);
  651. if (!q2spi_hrf_req) {
  652. Q2SPI_ERROR(q2spi, "%s Err alloc hrf req failed\n", __func__);
  653. return -ENOMEM;
  654. }
  655. q2spi_hrf_req->data_buff =
  656. q2spi_kzalloc(q2spi, sizeof(struct q2spi_mc_hrf_entry), __LINE__);
  657. if (!q2spi_hrf_req->data_buff) {
  658. Q2SPI_ERROR(q2spi, "%s Err alloc hrf data_buff failed\n", __func__);
  659. q2spi_kfree(q2spi, q2spi_hrf_req, __LINE__);
  660. return -ENOMEM;
  661. }
  662. *q2spi_hrf_req_ptr = q2spi_hrf_req;
  663. hrf_entry.cmd = Q2SPI_SLEEP_OPCODE;
  664. hrf_entry.parity = 0;
  665. hrf_entry.arg1 = Q2SPI_CLIENT_SLEEP_BYTE;
  666. hrf_entry.arg2 = 0;
  667. hrf_entry.arg3 = 0;
  668. q2spi_hrf_req->addr = Q2SPI_HRF_PUSH_ADDRESS;
  669. q2spi_hrf_req->data_len = HRF_ENTRY_DATA_LEN;
  670. q2spi_hrf_req->sync = 1;
  671. q2spi_hrf_req->priority = 1;
  672. q2spi_hrf_req->cmd = LOCAL_REG_WRITE;
  673. memcpy(q2spi_hrf_req->data_buff, &hrf_entry, sizeof(struct q2spi_mc_hrf_entry));
  674. Q2SPI_DEBUG(q2spi, "%s End q2spi_hrf_req:%p\n", __func__, q2spi_hrf_req);
  675. return 0;
  676. }
  677. /**
  678. * q2spi_hrf_entry_format - prepare HRF entry for HRF flow
  679. * @q2spi: Pointer to main q2spi_geni structure
  680. * @q2spi_req: structure for q2spi_request
  681. * @q2spi_hrf_req: pointer to q2spi hrf type of q2spi_request
  682. *
  683. * This function hrf entry as per the format defined in spec.
  684. *
  685. * Return: 0 for success, negative number for error condition.
  686. */
  687. static int q2spi_hrf_entry_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  688. struct q2spi_request **q2spi_hrf_req_ptr)
  689. {
  690. struct q2spi_request *q2spi_hrf_req = NULL;
  691. struct q2spi_mc_hrf_entry hrf_entry;
  692. int flow_id;
  693. q2spi_hrf_req = q2spi_kzalloc(q2spi, sizeof(struct q2spi_request), __LINE__);
  694. if (!q2spi_hrf_req) {
  695. Q2SPI_ERROR(q2spi, "%s Err alloc hrf req failed\n", __func__);
  696. return -ENOMEM;
  697. }
  698. q2spi_hrf_req->data_buff =
  699. q2spi_kzalloc(q2spi, sizeof(struct q2spi_mc_hrf_entry), __LINE__);
  700. if (!q2spi_hrf_req->data_buff) {
  701. Q2SPI_ERROR(q2spi, "%s Err alloc hrf data_buff failed\n", __func__);
  702. return -ENOMEM;
  703. }
  704. *q2spi_hrf_req_ptr = q2spi_hrf_req;
  705. if (q2spi_req.cmd == HRF_WRITE) {
  706. hrf_entry.cmd = 3;
  707. hrf_entry.parity = 1;
  708. } else if (q2spi_req.cmd == HRF_READ) {
  709. hrf_entry.cmd = 4;
  710. hrf_entry.parity = 0;
  711. }
  712. hrf_entry.flow = HRF_ENTRY_FLOW;
  713. hrf_entry.type = HRF_ENTRY_TYPE;
  714. flow_id = q2spi_alloc_xfer_tid(q2spi);
  715. if (flow_id < 0) {
  716. Q2SPI_ERROR(q2spi, "%s Err failed to alloc flow_id", __func__);
  717. return -EINVAL;
  718. }
  719. hrf_entry.flow_id = flow_id;
  720. Q2SPI_DEBUG(q2spi, "%s flow_id:%d len:%d", __func__, hrf_entry.flow_id, q2spi_req.data_len);
  721. if (q2spi_req.data_len % 4) {
  722. hrf_entry.dwlen_part1 = (q2spi_req.data_len / 4) & 0xF;
  723. hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4) >> 4) & 0xFF;
  724. hrf_entry.dwlen_part3 = ((q2spi_req.data_len / 4) >> 12) & 0xFF;
  725. } else {
  726. hrf_entry.dwlen_part1 = (q2spi_req.data_len / 4 - 1) & 0xF;
  727. hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4 - 1) >> 4) & 0xFF;
  728. hrf_entry.dwlen_part3 = ((q2spi_req.data_len / 4 - 1) >> 12) & 0xFF;
  729. }
  730. Q2SPI_DEBUG(q2spi, "%s hrf_entry dwlen part1:%d part2:%d part3:%d\n",
  731. __func__, hrf_entry.dwlen_part1, hrf_entry.dwlen_part2, hrf_entry.dwlen_part3);
  732. hrf_entry.arg2 = q2spi_req.end_point;
  733. hrf_entry.arg3 = q2spi_req.proto_ind;
  734. q2spi_hrf_req->addr = q2spi_req.addr;
  735. q2spi_hrf_req->data_len = HRF_ENTRY_DATA_LEN;
  736. q2spi_hrf_req->cmd = HRF_WRITE;
  737. q2spi_hrf_req->flow_id = hrf_entry.flow_id;
  738. q2spi_hrf_req->end_point = q2spi_req.end_point;
  739. q2spi_hrf_req->proto_ind = q2spi_req.proto_ind;
  740. memcpy(q2spi_hrf_req->data_buff, &hrf_entry, sizeof(struct q2spi_mc_hrf_entry));
  741. return 0;
  742. }
  743. /**
  744. * q2spi_wait_for_doorbell_setup_ready - wait for doorbell buffers are queued to hw
  745. * @q2spi: Pointer to main q2spi_geni structure
  746. *
  747. * Return: none
  748. */
  749. void q2spi_wait_for_doorbell_setup_ready(struct q2spi_geni *q2spi)
  750. {
  751. long timeout = 0;
  752. if (!q2spi->doorbell_setup) {
  753. Q2SPI_DEBUG(q2spi, "%s: Waiting for Doorbell buffers to be setup\n", __func__);
  754. reinit_completion(&q2spi->db_setup_wait);
  755. timeout = wait_for_completion_interruptible_timeout(&q2spi->db_setup_wait,
  756. msecs_to_jiffies(50));
  757. if (timeout <= 0) {
  758. Q2SPI_DEBUG(q2spi, "%s Err timeout for DB buffers setup wait:%ld\n",
  759. __func__, timeout);
  760. if (timeout == -ERESTARTSYS)
  761. q2spi_sys_restart = true;
  762. }
  763. }
  764. }
  765. /**
  766. * q2spi_unmap_doorbell_rx_buf - unmap rx dma buffer mapped by q2spi_map_doorbell_rx_buf
  767. * @q2spi: Pointer to main q2spi_geni structure
  768. *
  769. * Return: none
  770. */
  771. void q2spi_unmap_doorbell_rx_buf(struct q2spi_geni *q2spi)
  772. {
  773. if (!q2spi->db_xfer->rx_dma) {
  774. Q2SPI_DEBUG(q2spi, "%s Doorbell DMA buffer already unmapped\n", __func__);
  775. return;
  776. }
  777. q2spi_unmap_dma_buf_used(q2spi, (dma_addr_t)NULL, q2spi->db_xfer->rx_dma);
  778. q2spi->db_xfer->rx_dma = (dma_addr_t)NULL;
  779. q2spi->doorbell_setup = false;
  780. }
  781. /**
  782. * q2spi_map_doorbell_rx_buf - map rx dma buffer to receive doorbell
  783. * @q2spi: Pointer to main q2spi_geni structure
  784. *
  785. * This function get one rx buffer using q2spi_get_doorbell_rx_buf and map to
  786. * gsi so that SW can receive doorbell
  787. *
  788. * Return: 0 for success, negative number for error condition.
  789. */
  790. int q2spi_map_doorbell_rx_buf(struct q2spi_geni *q2spi)
  791. {
  792. struct q2spi_packet *q2spi_pkt;
  793. int ret = 0;
  794. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid);
  795. if (q2spi_sys_restart)
  796. return -ERESTARTSYS;
  797. if (q2spi->port_release || atomic_read(&q2spi->is_suspend)) {
  798. Q2SPI_DEBUG(q2spi, "%s Port being closed or suspend return\n", __func__);
  799. return 0;
  800. }
  801. if (q2spi->db_xfer->rx_dma) {
  802. Q2SPI_DEBUG(q2spi, "%s Doorbell buffer already mapped\n", __func__);
  803. return 0;
  804. }
  805. memset(q2spi->db_q2spi_pkt, 0x00, sizeof(struct q2spi_packet));
  806. q2spi_pkt = q2spi->db_q2spi_pkt;
  807. q2spi_pkt->q2spi = q2spi;
  808. q2spi_pkt->m_cmd_param = Q2SPI_RX_ONLY;
  809. memset(q2spi->db_xfer, 0, sizeof(struct q2spi_dma_transfer));
  810. /* RX DMA buffer allocated to map to GSI to Receive Doorbell */
  811. /* Alloc RX DMA buf and map to gsi so that SW can receive Doorbell */
  812. ret = q2spi_get_doorbell_rx_buf(q2spi);
  813. if (ret) {
  814. Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf", __func__);
  815. return ret;
  816. }
  817. /* Map RX DMA descriptor on RX channel */
  818. q2spi->db_xfer->cmd = Q2SPI_RX_ONLY;
  819. q2spi->db_xfer->rx_data_len = RX_DMA_CR_BUF_SIZE; /* 96 byte for 4 crs in doorbell */
  820. q2spi->db_xfer->rx_len = RX_DMA_CR_BUF_SIZE;
  821. q2spi->db_xfer->q2spi_pkt = q2spi_pkt;
  822. q2spi_pkt->q2spi = q2spi;
  823. Q2SPI_DEBUG(q2spi, "%s PID=%d wait for gsi_lock\n", __func__, current->pid);
  824. mutex_lock(&q2spi->gsi_lock);
  825. Q2SPI_DEBUG(q2spi, "%s PID=%d acquired gsi_lock\n", __func__, current->pid);
  826. ret = q2spi_setup_gsi_xfer(q2spi_pkt);
  827. if (ret) {
  828. Q2SPI_ERROR(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret);
  829. mutex_unlock(&q2spi->gsi_lock);
  830. return ret;
  831. }
  832. Q2SPI_DEBUG(q2spi, "%s PID=%d release gsi_lock\n", __func__, current->pid);
  833. mutex_unlock(&q2spi->gsi_lock);
  834. q2spi->doorbell_setup = true;
  835. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  836. complete_all(&q2spi->db_setup_wait);
  837. return ret;
  838. }
  839. /**
  840. * q2spi_alloc_host_variant - allocate memory for host variant
  841. * @q2spi: Pointer to main q2spi_geni structure
  842. * @len: size of the memory to be allocate
  843. *
  844. * This function will allocate dma_alloc_coherant memory
  845. * of the length specified.
  846. *
  847. * Return: address of the buffer on success, NULL or ERR_PTR on
  848. * failure/error.
  849. */
  850. void *q2spi_alloc_host_variant(struct q2spi_geni *q2spi, int len)
  851. {
  852. void *ptr = NULL;
  853. ptr = geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->dma_buf, len);
  854. return ptr;
  855. }
  856. /**
  857. * q2spi_doorbell - q2spi doorbell to handle CR events from q2spi slave
  858. * @q2spi_cr_hdr_event: Pointer to q2spi_cr_hdr_event
  859. *
  860. * If Doorbell interrupt to Host is enabled, then Host will get doorbell interrupt upon
  861. * any error or new CR events from q2spi slave. This function used to parse CR header event
  862. * part of doorbell and prepare CR packet and add to CR queue list. Also map new RX
  863. * dma buffer to receive next doorbell.
  864. *
  865. * Return: 0 for success, negative number for error condition.
  866. */
  867. void q2spi_doorbell(struct q2spi_geni *q2spi,
  868. const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event)
  869. {
  870. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid);
  871. if (q2spi_sys_restart)
  872. return;
  873. memcpy(&q2spi->q2spi_cr_hdr_event, q2spi_cr_hdr_event,
  874. sizeof(struct qup_q2spi_cr_header_event));
  875. queue_work(q2spi->doorbell_wq, &q2spi->q2spi_doorbell_work);
  876. Q2SPI_DEBUG(q2spi, "%s End work queued PID=%d\n", __func__, current->pid);
  877. }
  878. /**
  879. * q2spi_prepare_cr_pkt - Allocates and populates CR packet as part of doorbell handling
  880. * @q2spi: Pointer to main q2spi_geni structure
  881. *
  882. * Return: pointer to allocated q2spi cr packet
  883. */
  884. struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi)
  885. {
  886. struct q2spi_cr_packet *q2spi_cr_pkt = NULL;
  887. const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event = NULL;
  888. unsigned long flags;
  889. int i = 0;
  890. u8 *ptr;
  891. q2spi_cr_hdr_event = &q2spi->q2spi_cr_hdr_event;
  892. if (q2spi_cr_hdr_event->byte0_len > 4) {
  893. Q2SPI_ERROR(q2spi, "%s Err num of valid crs:%d\n", __func__,
  894. q2spi_cr_hdr_event->byte0_len);
  895. return NULL;
  896. }
  897. q2spi_cr_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_cr_packet), __LINE__);
  898. if (!q2spi_cr_pkt) {
  899. Q2SPI_ERROR(q2spi, "%s Err q2spi_cr_pkt alloc failed\n", __func__);
  900. return NULL;
  901. }
  902. spin_lock_irqsave(&q2spi->cr_queue_lock, flags);
  903. q2spi_cr_pkt->num_valid_crs = q2spi_cr_hdr_event->byte0_len;
  904. Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt:%p hdr_0:0x%x no_of_crs=%d\n", __func__,
  905. q2spi_cr_pkt, q2spi_cr_hdr_event->cr_hdr[0], q2spi_cr_pkt->num_valid_crs);
  906. if (q2spi_cr_hdr_event->byte0_err)
  907. Q2SPI_DEBUG(q2spi, "%s Error: q2spi_cr_hdr_event->byte0_err=%d\n",
  908. __func__, q2spi_cr_hdr_event->byte0_err);
  909. for (i = 0; i < q2spi_cr_pkt->num_valid_crs; i++) {
  910. Q2SPI_DEBUG(q2spi, "%s hdr_[%d]:0x%x\n",
  911. __func__, i, q2spi_cr_hdr_event->cr_hdr[i]);
  912. q2spi_cr_pkt->cr_hdr[i].cmd = (q2spi_cr_hdr_event->cr_hdr[i]) & 0xF;
  913. q2spi_cr_pkt->cr_hdr[i].flow = (q2spi_cr_hdr_event->cr_hdr[i] >> 4) & 0x1;
  914. q2spi_cr_pkt->cr_hdr[i].type = (q2spi_cr_hdr_event->cr_hdr[i] >> 5) & 0x3;
  915. q2spi_cr_pkt->cr_hdr[i].parity = (q2spi_cr_hdr_event->cr_hdr[i] >> 7) & 0x1;
  916. Q2SPI_DEBUG(q2spi, "%s CR HDR[%d] cmd/opcode:%d C_flow:%d type:%d parity:%d\n",
  917. __func__, i, q2spi_cr_pkt->cr_hdr[i].cmd,
  918. q2spi_cr_pkt->cr_hdr[i].flow, q2spi_cr_pkt->cr_hdr[i].type,
  919. q2spi_cr_pkt->cr_hdr[i].parity);
  920. if ((q2spi_cr_hdr_event->cr_hdr[i] & 0xF) == CR_EXTENSION) {
  921. q2spi_cr_pkt->ext_cr_hdr.cmd = (q2spi_cr_hdr_event->cr_hdr[i]) & 0xF;
  922. q2spi_cr_pkt->ext_cr_hdr.dw_len =
  923. (q2spi_cr_hdr_event->cr_hdr[i] >> 4) & 0x3;
  924. q2spi_cr_pkt->ext_cr_hdr.parity =
  925. (q2spi_cr_hdr_event->cr_hdr[i] >> 7) & 0x1;
  926. Q2SPI_DEBUG(q2spi, "%s CR EXT HDR[%d] cmd/opcode:%d dw_len:%d parity:%d\n",
  927. __func__, i, q2spi_cr_pkt->ext_cr_hdr.cmd,
  928. q2spi_cr_pkt->ext_cr_hdr.dw_len,
  929. q2spi_cr_pkt->ext_cr_hdr.parity);
  930. }
  931. }
  932. ptr = (u8 *)q2spi->db_xfer->rx_buf;
  933. for (i = 0; i < q2spi_cr_pkt->num_valid_crs; i++) {
  934. if (q2spi_cr_pkt->cr_hdr[i].cmd == BULK_ACCESS_STATUS) {
  935. q2spi_cr_pkt->bulk_pkt[i].cmd = q2spi_cr_pkt->cr_hdr[i].cmd;
  936. q2spi_cr_pkt->bulk_pkt[i].flow = q2spi_cr_pkt->cr_hdr[i].flow;
  937. q2spi_cr_pkt->bulk_pkt[i].parity = q2spi_cr_pkt->cr_hdr[i].parity;
  938. q2spi_dump_ipc(q2spi, q2spi->ipc, "DB BULK DMA RX",
  939. (char *)ptr, q2spi->db_xfer->rx_len);
  940. q2spi_cr_pkt->bulk_pkt[i].status = ptr[0] & 0xF;
  941. q2spi_cr_pkt->bulk_pkt[i].flow_id = ptr[0] >> 4;
  942. ptr += CR_BULK_DATA_SIZE;
  943. q2spi_cr_pkt->cr_hdr_type[i] = CR_HDR_BULK;
  944. Q2SPI_DEBUG(q2spi, "%s i:%d cr_hdr_type:0x%x flow_id:%d\n",
  945. __func__, i, q2spi_cr_pkt->cr_hdr_type[i],
  946. q2spi_cr_pkt->bulk_pkt[i].flow_id);
  947. } else if ((q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS) ||
  948. (q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_RD_ACCESS)) {
  949. memcpy((void *)&q2spi_cr_pkt->var3_pkt[i], (void *)ptr,
  950. sizeof(struct q2spi_client_dma_pkt));
  951. q2spi_dump_ipc(q2spi, q2spi->ipc, "DB VAR3 DMA RX",
  952. (char *)ptr, q2spi->db_xfer->rx_len);
  953. ptr += CR_DMA_DATA_SIZE;
  954. q2spi_cr_pkt->cr_hdr_type[i] = CR_HDR_VAR3;
  955. Q2SPI_DEBUG(q2spi, "%s i:%d cr_hdr_type:0x%x\n",
  956. __func__, i, q2spi_cr_pkt->cr_hdr_type[i]);
  957. Q2SPI_DEBUG(q2spi, "%s var3_pkt:%p var3_flow_id:%d\n",
  958. __func__, &q2spi_cr_pkt->var3_pkt[i],
  959. q2spi_cr_pkt->var3_pkt[i].flow_id);
  960. Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d\n", __func__,
  961. q2spi_cr_pkt->var3_pkt[i].dw_len_part1,
  962. q2spi_cr_pkt->var3_pkt[i].dw_len_part2);
  963. } else if (q2spi_cr_pkt->cr_hdr[i].cmd == CR_EXTENSION) {
  964. complete_all(&q2spi->wait_for_ext_cr);
  965. q2spi_cr_pkt->extension_pkt.cmd = q2spi_cr_pkt->ext_cr_hdr.cmd;
  966. q2spi_cr_pkt->extension_pkt.dw_len = q2spi_cr_pkt->ext_cr_hdr.dw_len;
  967. q2spi_cr_pkt->extension_pkt.parity = q2spi_cr_pkt->ext_cr_hdr.parity;
  968. ptr += q2spi_cr_pkt->extension_pkt.dw_len * 4 + CR_EXTENSION_DATA_BYTES;
  969. Q2SPI_DEBUG(q2spi, "%s Extension CR cmd:%d dwlen:%d parity:%d\n", __func__,
  970. q2spi_cr_pkt->extension_pkt.cmd,
  971. q2spi_cr_pkt->extension_pkt.dw_len,
  972. q2spi_cr_pkt->extension_pkt.parity);
  973. q2spi_cr_pkt->cr_hdr_type[i] = CR_HDR_VAR3;
  974. }
  975. }
  976. spin_unlock_irqrestore(&q2spi->cr_queue_lock, flags);
  977. return q2spi_cr_pkt;
  978. }
  979. static int q2spi_open(struct inode *inode, struct file *filp)
  980. {
  981. struct cdev *cdev;
  982. struct q2spi_chrdev *q2spi_cdev;
  983. struct q2spi_geni *q2spi;
  984. int ret = 0, rc = 0;
  985. if (q2spi_sys_restart)
  986. return -ERESTARTSYS;
  987. rc = iminor(inode);
  988. cdev = inode->i_cdev;
  989. q2spi_cdev = container_of(cdev, struct q2spi_chrdev, cdev[rc]);
  990. if (!q2spi_cdev) {
  991. pr_err("%s Err q2spi_cdev NULL\n", __func__);
  992. return -EINVAL;
  993. }
  994. q2spi = container_of(q2spi_cdev, struct q2spi_geni, chrdev);
  995. if (!q2spi) {
  996. pr_err("%s Err q2spi NULL\n", __func__);
  997. return -EINVAL;
  998. }
  999. if (!q2spi->port_release) {
  1000. Q2SPI_DEBUG(q2spi, "%s Err port already opened PID:%d\n", __func__, current->pid);
  1001. return -EBUSY;
  1002. }
  1003. Q2SPI_DEBUG(q2spi, "%s PID:%d, allocs=%d\n",
  1004. __func__, current->pid, atomic_read(&q2spi->alloc_count));
  1005. if (q2spi->hw_state_is_bad) {
  1006. Q2SPI_DEBUG(q2spi, "%s Err Retries failed, check HW state\n", __func__);
  1007. return -EPIPE;
  1008. }
  1009. if (q2spi_geni_resources_on(q2spi))
  1010. return -EIO;
  1011. /* Q2SPI slave HPG 2.1 Initialization */
  1012. ret = q2spi_slave_init(q2spi);
  1013. if (ret) {
  1014. Q2SPI_ERROR(q2spi, "%s Err Failed to init q2spi slave %d\n",
  1015. __func__, ret);
  1016. return ret;
  1017. }
  1018. q2spi->port_release = false;
  1019. if (!q2spi->doorbell_setup) {
  1020. ret = q2spi_map_doorbell_rx_buf(q2spi);
  1021. if (ret) {
  1022. Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf\n", __func__);
  1023. q2spi->port_release = true;
  1024. return ret;
  1025. }
  1026. }
  1027. filp->private_data = q2spi;
  1028. Q2SPI_DEBUG(q2spi, "%s End PID:%d, allocs:%d\n",
  1029. __func__, current->pid, atomic_read(&q2spi->alloc_count));
  1030. return 0;
  1031. }
  1032. /**
  1033. * q2spi_get_variant_buf - Get one buffer allocated from pre allocated buffers
  1034. * @q2spi: Pointer to main q2spi_geni structure
  1035. * @q2spi_pkt: pointer to q2spi packet
  1036. * @vtype: variant type in q2spi_pkt
  1037. *
  1038. * This function get one buffer allocated using q2spi_pre_alloc_buffers() based on variant type
  1039. * specified in q2spi packet.
  1040. *
  1041. * Return: 0 for success, negative number for error condition.
  1042. */
  1043. static inline void *q2spi_get_variant_buf(struct q2spi_geni *q2spi,
  1044. struct q2spi_packet *q2spi_pkt, enum var_type vtype)
  1045. {
  1046. int i;
  1047. if (vtype != VARIANT_1_LRA && vtype != VARIANT_5) {
  1048. Q2SPI_ERROR(q2spi, "%s Err Invalid variant:%d!\n", __func__, vtype);
  1049. return NULL;
  1050. }
  1051. /* Pick buffers from pre allocated pool */
  1052. if (vtype == VARIANT_1_LRA) {
  1053. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  1054. if (!q2spi->var1_buf_used[i])
  1055. break;
  1056. }
  1057. if (i < Q2SPI_MAX_BUF) {
  1058. q2spi->var1_buf_used[i] = q2spi->var1_buf[i];
  1059. q2spi_pkt->var1_tx_dma = q2spi->var1_dma_buf[i];
  1060. Q2SPI_DEBUG(q2spi, "%s ALLOC var1 i:%d vir1_buf:%p phy_dma_buf:%p\n",
  1061. __func__, i, (void *)q2spi->var1_buf[i],
  1062. (void *)q2spi->var1_dma_buf[i]);
  1063. return (void *)q2spi->var1_buf[i];
  1064. }
  1065. } else if (vtype == VARIANT_5) {
  1066. for (i = 0; i < Q2SPI_MAX_BUF; i++) {
  1067. if (!q2spi->var5_buf_used[i])
  1068. break;
  1069. }
  1070. if (i < Q2SPI_MAX_BUF) {
  1071. q2spi->var5_buf_used[i] = q2spi->var5_buf[i];
  1072. q2spi_pkt->var5_tx_dma = q2spi->var5_dma_buf[i];
  1073. Q2SPI_DEBUG(q2spi, "%s ALLOC var5 i:%d vir5_buf:%p phy_dma_buf:%p\n",
  1074. __func__, i, (void *)q2spi->var5_buf[i],
  1075. (void *)q2spi->var5_dma_buf[i]);
  1076. return (void *)q2spi->var5_buf[i];
  1077. }
  1078. }
  1079. Q2SPI_ERROR(q2spi, "%s Err Short of buffers for variant:%d!\n", __func__, vtype);
  1080. return NULL;
  1081. }
  1082. /**
  1083. * q2spi_alloc_xfer_tid() - Allocate a tid to q2spi transfer request
  1084. * @q2spi: Pointer to main q2spi_geni structure
  1085. *
  1086. * Return: zero on success with valid xfer->tid and error code on failures.
  1087. */
  1088. int q2spi_alloc_xfer_tid(struct q2spi_geni *q2spi)
  1089. {
  1090. unsigned long flags;
  1091. int tid = 0;
  1092. spin_lock_irqsave(&q2spi->txn_lock, flags);
  1093. tid = idr_alloc_cyclic(&q2spi->tid_idr, q2spi, Q2SPI_START_TID_ID,
  1094. Q2SPI_END_TID_ID, GFP_ATOMIC);
  1095. if (tid < Q2SPI_START_TID_ID || tid > Q2SPI_END_TID_ID) {
  1096. Q2SPI_ERROR(q2spi, "%s Err Invalid tid:%d\n", __func__, tid);
  1097. spin_unlock_irqrestore(&q2spi->txn_lock, flags);
  1098. return -EINVAL;
  1099. }
  1100. Q2SPI_DEBUG(q2spi, "%s tid:%d ret:%d\n", __func__, tid, tid);
  1101. spin_unlock_irqrestore(&q2spi->txn_lock, flags);
  1102. return tid;
  1103. }
  1104. /**
  1105. * q2spi_free_xfer_tid() - Freee tid of xfer
  1106. * @q2spi: Pointer to main q2spi_geni structure
  1107. *
  1108. */
  1109. void q2spi_free_xfer_tid(struct q2spi_geni *q2spi, int tid)
  1110. {
  1111. unsigned long flags;
  1112. spin_lock_irqsave(&q2spi->txn_lock, flags);
  1113. Q2SPI_DEBUG(q2spi, "%s tid:%d\n", __func__, tid);
  1114. if (tid < Q2SPI_START_TID_ID || tid > Q2SPI_END_TID_ID) {
  1115. Q2SPI_ERROR(q2spi, "%s Err Invalid tid:%d\n", __func__, tid);
  1116. spin_unlock_irqrestore(&q2spi->txn_lock, flags);
  1117. }
  1118. idr_remove(&q2spi->tid_idr, tid);
  1119. spin_unlock_irqrestore(&q2spi->txn_lock, flags);
  1120. }
  1121. static unsigned int
  1122. q2spi_get_dw_offset(struct q2spi_geni *q2spi, enum cmd_type c_type, unsigned int reg_offset)
  1123. {
  1124. unsigned int offset = 0, remainder = 0, quotient = 0;
  1125. offset = reg_offset / Q2SPI_OFFSET_MASK;
  1126. Q2SPI_DEBUG(q2spi, "%s type:%d offset:%d remainder:%d quotient:%d\n",
  1127. __func__, c_type, offset, remainder, quotient);
  1128. return offset;
  1129. }
  1130. int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1131. struct q2spi_packet **q2spi_pkt_ptr, int vtype)
  1132. {
  1133. struct q2spi_packet *q2spi_pkt;
  1134. struct q2spi_host_variant1_pkt *q2spi_hc_var1;
  1135. int ret;
  1136. unsigned int dw_offset = 0;
  1137. q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__);
  1138. if (!q2spi_pkt)
  1139. return -ENOMEM;
  1140. *q2spi_pkt_ptr = q2spi_pkt;
  1141. q2spi_hc_var1 = (struct q2spi_host_variant1_pkt *)
  1142. q2spi_get_variant_buf(q2spi, q2spi_pkt, VARIANT_1_LRA);
  1143. if (!q2spi_hc_var1) {
  1144. Q2SPI_DEBUG(q2spi, "%s Err Invalid q2spi_hc_var1\n", __func__);
  1145. return -ENOMEM;
  1146. }
  1147. Q2SPI_DEBUG(q2spi, "%s var_1:%p var_1_phy:%p cmd:%d\n",
  1148. __func__, q2spi_hc_var1, (void *)q2spi_pkt->var1_tx_dma, q2spi_req.cmd);
  1149. if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == HRF_READ) {
  1150. q2spi_hc_var1->cmd = HC_DATA_READ;
  1151. q2spi_pkt->m_cmd_param = Q2SPI_TX_RX;
  1152. ret = q2spi_get_rx_buf(q2spi_pkt, q2spi_req.data_len);
  1153. if (ret)
  1154. return ret;
  1155. } else if (q2spi_req.cmd == LOCAL_REG_WRITE || q2spi_req.cmd == HRF_WRITE) {
  1156. q2spi_hc_var1->cmd = HC_DATA_WRITE;
  1157. q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY;
  1158. q2spi_req.data_len = sizeof(q2spi_hc_var1->data_buf) <= q2spi_req.data_len ?
  1159. sizeof(q2spi_hc_var1->data_buf) : q2spi_req.data_len;
  1160. memcpy(q2spi_hc_var1->data_buf, q2spi_req.data_buff, q2spi_req.data_len);
  1161. q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__);
  1162. q2spi_req.data_buff = NULL;
  1163. }
  1164. q2spi_hc_var1->flow = MC_FLOW;
  1165. q2spi_hc_var1->interrupt = CLIENT_INTERRUPT;
  1166. q2spi_hc_var1->seg_last = SEGMENT_LST;
  1167. if (q2spi_req.data_len % 4)
  1168. q2spi_hc_var1->dw_len = (q2spi_req.data_len / 4);
  1169. else
  1170. q2spi_hc_var1->dw_len = (q2spi_req.data_len / 4) - 1;
  1171. q2spi_hc_var1->access_type = LOCAL_REG_ACCESS;
  1172. q2spi_hc_var1->address_mode = CLIENT_ADDRESS;
  1173. Q2SPI_DEBUG(q2spi, "%s data_len:%d dw_len:%d req_flow_id:%d\n",
  1174. __func__, q2spi_req.data_len, q2spi_hc_var1->dw_len, q2spi_req.flow_id);
  1175. if (!q2spi_req.flow_id && !q2spi->hrf_flow) {
  1176. ret = q2spi_alloc_xfer_tid(q2spi);
  1177. if (ret < 0) {
  1178. Q2SPI_ERROR(q2spi, "%s Err failed to alloc xfer_tid\n", __func__);
  1179. return -EINVAL;
  1180. }
  1181. q2spi_hc_var1->flow_id = ret;
  1182. } else {
  1183. q2spi_hc_var1->flow_id = q2spi_req.flow_id;
  1184. }
  1185. dw_offset = q2spi_get_dw_offset(q2spi, q2spi_req.cmd, q2spi_req.addr);
  1186. q2spi_hc_var1->reg_offset = dw_offset;
  1187. q2spi_pkt->xfer->tid = q2spi_hc_var1->flow_id;
  1188. q2spi_pkt->var1_pkt = q2spi_hc_var1;
  1189. q2spi_pkt->vtype = vtype;
  1190. q2spi_pkt->valid = true;
  1191. q2spi_pkt->sync = q2spi_req.sync;
  1192. Q2SPI_DEBUG(q2spi, "%s *q2spi_pkt_ptr:%p End ret flow_id:%d\n",
  1193. __func__, *q2spi_pkt_ptr, q2spi_hc_var1->flow_id);
  1194. return q2spi_hc_var1->flow_id;
  1195. }
  1196. int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1197. struct q2spi_packet *q2spi_pkt)
  1198. {
  1199. struct q2spi_host_variant4_5_pkt *q2spi_hc_var5;
  1200. int ret = 0, flow_id;
  1201. if (!q2spi) {
  1202. Q2SPI_ERROR(q2spi, "%s Err q2spi NULL\n", __func__);
  1203. return -EINVAL;
  1204. }
  1205. if (!q2spi_pkt) {
  1206. Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__);
  1207. return -EINVAL;
  1208. }
  1209. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n",
  1210. __func__, q2spi_pkt, q2spi_pkt->var1_pkt,
  1211. (void *)q2spi_pkt->var5_tx_dma,
  1212. q2spi_pkt->var5_pkt);
  1213. Q2SPI_DEBUG(q2spi, "%s req_cmd:%d req_addr:%d req_len:%d req_data_buf:%p\n",
  1214. __func__, q2spi_req.cmd, q2spi_req.addr, q2spi_req.data_len,
  1215. q2spi_req.data_buff);
  1216. q2spi_hc_var5 = (struct q2spi_host_variant4_5_pkt *)
  1217. q2spi_get_variant_buf(q2spi, q2spi_pkt, VARIANT_5);
  1218. if (!q2spi_hc_var5) {
  1219. Q2SPI_ERROR(q2spi, "%s Err var5 buffer is not available\n", __func__);
  1220. return -ENOMEM;
  1221. }
  1222. memset(q2spi_hc_var5->data_buf, 0xba, 4096);
  1223. Q2SPI_DEBUG(q2spi, "%s var_5:%p cmd:%d\n", __func__, q2spi_hc_var5, q2spi_req.cmd);
  1224. Q2SPI_DEBUG(q2spi, "%s pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n",
  1225. __func__, q2spi_pkt->var1_pkt,
  1226. (void *)q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt);
  1227. if (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) {
  1228. Q2SPI_ERROR(q2spi, "%s Err (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) %d return\n",
  1229. __func__, q2spi_req.data_len);
  1230. Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffer\n", __func__);
  1231. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_rx_dma);
  1232. return -ENOMEM;
  1233. }
  1234. if (q2spi_req.cmd == DATA_READ || q2spi_req.cmd == HRF_READ) {
  1235. q2spi_hc_var5->cmd = HC_SMA_READ;
  1236. q2spi_pkt->m_cmd_param = Q2SPI_TX_RX;
  1237. ret = q2spi_get_rx_buf(q2spi_pkt, q2spi_req.data_len);
  1238. if (ret) {
  1239. Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffer\n", __func__);
  1240. q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma,
  1241. q2spi_pkt->var5_rx_dma);
  1242. return ret;
  1243. }
  1244. } else if (q2spi_req.cmd == DATA_WRITE || q2spi_req.cmd == HRF_WRITE) {
  1245. q2spi_hc_var5->cmd = HC_SMA_WRITE;
  1246. q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY;
  1247. q2spi_req.data_len = sizeof(q2spi_hc_var5->data_buf) <= q2spi_req.data_len ?
  1248. sizeof(q2spi_hc_var5->data_buf) : q2spi_req.data_len;
  1249. memcpy(q2spi_hc_var5->data_buf, q2spi_req.data_buff, q2spi_req.data_len);
  1250. q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format q2spi_req data_buf",
  1251. (char *)q2spi_req.data_buff, q2spi_req.data_len);
  1252. q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format var5 data_buf",
  1253. (char *)q2spi_hc_var5->data_buf, q2spi_req.data_len);
  1254. q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__);
  1255. q2spi_req.data_buff = NULL;
  1256. }
  1257. if (q2spi_req.flow_id < Q2SPI_END_TID_ID)
  1258. q2spi_hc_var5->flow = MC_FLOW;
  1259. else
  1260. q2spi_hc_var5->flow = CM_FLOW;
  1261. q2spi_hc_var5->interrupt = CLIENT_INTERRUPT;
  1262. q2spi_hc_var5->seg_last = SEGMENT_LST;
  1263. q2spi_pkt->data_length = q2spi_req.data_len;
  1264. if (q2spi_req.data_len % 4) {
  1265. q2spi_hc_var5->dw_len_part1 = (q2spi_req.data_len / 4);
  1266. q2spi_hc_var5->dw_len_part2 = (q2spi_req.data_len / 4) >> 2;
  1267. } else {
  1268. q2spi_hc_var5->dw_len_part1 = (q2spi_req.data_len / 4) - 1;
  1269. q2spi_hc_var5->dw_len_part2 = ((q2spi_req.data_len / 4) - 1) >> 2;
  1270. }
  1271. Q2SPI_DEBUG(q2spi, "%s dw_len_part1:%d dw_len_part2:%d\n",
  1272. __func__, q2spi_hc_var5->dw_len_part1, q2spi_hc_var5->dw_len_part2);
  1273. q2spi_hc_var5->access_type = SYSTEM_MEMORY_ACCESS;
  1274. q2spi_hc_var5->address_mode = NO_CLIENT_ADDRESS;
  1275. if (!q2spi_req.flow_id && !q2spi->hrf_flow) {
  1276. flow_id = q2spi_alloc_xfer_tid(q2spi);
  1277. if (flow_id < 0) {
  1278. Q2SPI_ERROR(q2spi, "%s Err failed to alloc tid", __func__);
  1279. return -EINVAL;
  1280. }
  1281. q2spi_hc_var5->flow_id = flow_id;
  1282. } else {
  1283. if (q2spi_req.flow_id < Q2SPI_END_TID_ID)
  1284. q2spi_hc_var5->flow_id = q2spi_pkt->flow_id;
  1285. else
  1286. q2spi_hc_var5->flow_id = q2spi_req.flow_id;
  1287. }
  1288. q2spi_pkt->xfer->tid = q2spi_hc_var5->flow_id;
  1289. q2spi_pkt->var5_pkt = q2spi_hc_var5;
  1290. q2spi_pkt->vtype = VARIANT_5;
  1291. q2spi_pkt->valid = true;
  1292. q2spi_pkt->sync = q2spi_req.sync;
  1293. q2spi_pkt->flow_id = q2spi_hc_var5->flow_id;
  1294. Q2SPI_DEBUG(q2spi, "%s flow id:%d q2spi_pkt:%p pkt_var1:%p pkt_tx_dma:%p var5_pkt:%p\n",
  1295. __func__, q2spi_hc_var5->flow_id, q2spi_pkt,
  1296. q2spi_pkt->var1_pkt, (void *)q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt);
  1297. q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format var5(2) data_buf",
  1298. (char *)q2spi_hc_var5->data_buf, q2spi_req.data_len);
  1299. return q2spi_hc_var5->flow_id;
  1300. }
  1301. static int q2spi_abort_command(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1302. struct q2spi_packet **q2spi_pkt_ptr)
  1303. {
  1304. struct q2spi_host_abort_pkt *q2spi_abort_req;
  1305. struct q2spi_packet *q2spi_pkt;
  1306. if (!q2spi) {
  1307. Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi\n", __func__);
  1308. return -EINVAL;
  1309. }
  1310. Q2SPI_DEBUG(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n",
  1311. __func__, q2spi_req.cmd, q2spi_req.addr,
  1312. q2spi_req.flow_id, q2spi_req.data_len);
  1313. q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__);
  1314. if (!q2spi_pkt)
  1315. return -ENOMEM;
  1316. *q2spi_pkt_ptr = q2spi_pkt;
  1317. q2spi_abort_req = q2spi_alloc_host_variant(q2spi, sizeof(struct q2spi_host_abort_pkt));
  1318. if (!q2spi_abort_req) {
  1319. Q2SPI_ERROR(q2spi, "%s Err alloc and map failed\n", __func__);
  1320. return -EINVAL;
  1321. }
  1322. q2spi_abort_req->cmd = HC_ABORT;
  1323. q2spi_abort_req->flow_id = q2spi_alloc_xfer_tid(q2spi);
  1324. q2spi_pkt->xfer->tid = q2spi_abort_req->flow_id;
  1325. q2spi_abort_req->code = 0;
  1326. q2spi_pkt->abort_pkt = q2spi_abort_req;
  1327. q2spi_pkt->vtype = VAR_ABORT;
  1328. q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY;
  1329. return q2spi_abort_req->flow_id;
  1330. }
  1331. static int q2spi_soft_reset(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1332. struct q2spi_packet **q2spi_pkt_ptr)
  1333. {
  1334. struct q2spi_host_soft_reset_pkt *q2spi_softreset_req;
  1335. struct q2spi_packet *q2spi_pkt;
  1336. if (!q2spi) {
  1337. Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi\n", __func__);
  1338. return -EINVAL;
  1339. }
  1340. Q2SPI_DEBUG(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n",
  1341. __func__, q2spi_req.cmd, q2spi_req.addr,
  1342. q2spi_req.flow_id, q2spi_req.data_len);
  1343. q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__);
  1344. if (!q2spi_pkt)
  1345. return -ENOMEM;
  1346. *q2spi_pkt_ptr = q2spi_pkt;
  1347. q2spi_softreset_req = q2spi_alloc_host_variant(q2spi,
  1348. sizeof(struct q2spi_host_soft_reset_pkt));
  1349. if (!q2spi_softreset_req) {
  1350. Q2SPI_ERROR(q2spi, "%s Err alloc and map failed\n", __func__);
  1351. q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__);
  1352. q2spi_pkt = NULL;
  1353. return -EINVAL;
  1354. }
  1355. q2spi_softreset_req->cmd = HC_SOFT_RESET;
  1356. q2spi_softreset_req->flags = HC_SOFT_RESET_FLAGS;
  1357. q2spi_softreset_req->code = HC_SOFT_RESET_CODE;
  1358. q2spi_pkt->soft_reset_pkt = q2spi_softreset_req;
  1359. q2spi_pkt->soft_reset_tx_dma = q2spi->dma_buf;
  1360. q2spi_pkt->vtype = VAR_SOFT_RESET;
  1361. q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY;
  1362. return 0;
  1363. }
  1364. void q2spi_notify_data_avail_for_client(struct q2spi_geni *q2spi)
  1365. {
  1366. Q2SPI_DEBUG(q2spi, "%s wake userspace\n", __func__);
  1367. atomic_inc(&q2spi->rx_avail);
  1368. wake_up_interruptible(&q2spi->readq);
  1369. wake_up(&q2spi->read_wq);
  1370. }
  1371. int q2spi_hrf_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1372. struct q2spi_packet **q2spi_pkt_ptr)
  1373. {
  1374. struct q2spi_request *q2spi_hrf_req;
  1375. struct q2spi_packet *q2spi_pkt = NULL;
  1376. int ret = 0;
  1377. ret = q2spi_hrf_entry_format_sleep(q2spi, q2spi_req, &q2spi_hrf_req);
  1378. if (ret < 0) {
  1379. Q2SPI_ERROR(q2spi, "%s Err q2spi_hrf_entry_format failed ret:%d\n", __func__, ret);
  1380. return ret;
  1381. }
  1382. Q2SPI_DEBUG(q2spi, "%s hrf_req cmd:%d flow_id:%d data_buff:%p\n",
  1383. __func__, q2spi_hrf_req->cmd, q2spi_hrf_req->flow_id, q2spi_hrf_req->data_buff);
  1384. ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA);
  1385. Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n",
  1386. __func__, q2spi_hrf_req, q2spi_pkt);
  1387. if (ret < 0) {
  1388. Q2SPI_ERROR(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret);
  1389. return ret;
  1390. }
  1391. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1392. q2spi_kfree(q2spi, q2spi_hrf_req, __LINE__);
  1393. *q2spi_pkt_ptr = q2spi_pkt;
  1394. Q2SPI_DEBUG(q2spi, "%s End %d\n", __func__, __LINE__);
  1395. return ret;
  1396. }
  1397. int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1398. struct q2spi_packet **q2spi_pkt_ptr)
  1399. {
  1400. struct q2spi_request *q2spi_hrf_req;
  1401. struct q2spi_packet *q2spi_pkt = NULL;
  1402. int ret = 0;
  1403. q2spi->hrf_flow = true;
  1404. ret = q2spi_hrf_entry_format(q2spi, q2spi_req, &q2spi_hrf_req);
  1405. if (ret < 0) {
  1406. Q2SPI_ERROR(q2spi, "%s Err q2spi_hrf_entry_format failed ret:%d\n", __func__, ret);
  1407. return ret;
  1408. }
  1409. Q2SPI_DEBUG(q2spi, "%s cmd:%d flow_id:%d data_buff:%p\n",
  1410. __func__, q2spi_req.cmd, q2spi_req.flow_id, q2spi_req.data_buff);
  1411. Q2SPI_DEBUG(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n",
  1412. __func__, q2spi_req.addr, q2spi_req.proto_ind, q2spi_req.data_len);
  1413. ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF);
  1414. Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n",
  1415. __func__, q2spi_hrf_req, q2spi_pkt);
  1416. if (ret < 0) {
  1417. Q2SPI_ERROR(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret);
  1418. return ret;
  1419. }
  1420. q2spi_pkt->flow_id = ret;
  1421. ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt);
  1422. if (ret < 0) {
  1423. Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n", __func__, ret);
  1424. q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
  1425. q2spi_kfree(q2spi, q2spi_pkt, __LINE__);
  1426. return ret;
  1427. }
  1428. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1429. q2spi_pkt->vtype = VARIANT_5_HRF;
  1430. q2spi_kfree(q2spi, q2spi_hrf_req, __LINE__);
  1431. *q2spi_pkt_ptr = q2spi_pkt;
  1432. q2spi->hrf_flow = false;
  1433. Q2SPI_DEBUG(q2spi, "%s End q2spi_pkt:%p\n", __func__, q2spi_pkt);
  1434. return ret;
  1435. }
  1436. void q2spi_print_req_cmd(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req)
  1437. {
  1438. if (q2spi_req.cmd == LOCAL_REG_READ)
  1439. Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_READ\n", __func__);
  1440. else if (q2spi_req.cmd == LOCAL_REG_WRITE)
  1441. Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_WRITE\n", __func__);
  1442. else if (q2spi_req.cmd == HRF_READ)
  1443. Q2SPI_DEBUG(q2spi, "%s cmd:HRF_READ\n", __func__);
  1444. else if (q2spi_req.cmd == HRF_WRITE)
  1445. Q2SPI_DEBUG(q2spi, "%s cmd:HRF_WRITE\n", __func__);
  1446. else if (q2spi_req.cmd == DATA_READ)
  1447. Q2SPI_DEBUG(q2spi, "%s cmd:DATA_READ\n", __func__);
  1448. else if (q2spi_req.cmd == DATA_WRITE)
  1449. Q2SPI_DEBUG(q2spi, "%s cmd:DATA_WRITE\n", __func__);
  1450. else if (q2spi_req.cmd == SOFT_RESET)
  1451. Q2SPI_DEBUG(q2spi, "%s cmd:SOFT_RESET\n", __func__);
  1452. else if (q2spi_req.cmd == Q2SPI_HRF_SLEEP_CMD)
  1453. Q2SPI_DEBUG(q2spi, "%s cmd:Sleep CMD to Client\n", __func__);
  1454. else
  1455. Q2SPI_DEBUG(q2spi, "%s Invalid cmd:%d\n", __func__, q2spi_req.cmd);
  1456. }
  1457. /*
  1458. * q2spi_del_pkt_from_tx_queue - Delete q2spi packets from tx_queue_list
  1459. * @q2spi: pointer to q2spi_geni
  1460. * @cur_q2spi_pkt: ponter to q2spi_packet
  1461. *
  1462. * This function iterates through the tx_queue_list and obtains the cur_q2spi_pkt
  1463. * and delete the completed packet from the list if q2spi_pkt->state is under deletion.
  1464. *
  1465. * Return: Returns true if given packet is found in tx_queue_list and deleted, else returns false.
  1466. */
  1467. bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet *cur_q2spi_pkt)
  1468. {
  1469. struct q2spi_packet *q2spi_pkt, *q2spi_pkt_tmp;
  1470. bool found = false;
  1471. if (!cur_q2spi_pkt) {
  1472. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt NULL\n", __func__);
  1473. q2spi_tx_queue_status(q2spi);
  1474. return found;
  1475. }
  1476. mutex_lock(&q2spi->queue_lock);
  1477. list_for_each_entry_safe(q2spi_pkt, q2spi_pkt_tmp, &q2spi->tx_queue_list, list) {
  1478. if (cur_q2spi_pkt == q2spi_pkt) {
  1479. Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt:%p state:%s\n", __func__,
  1480. q2spi_pkt, q2spi_pkt_state(q2spi_pkt));
  1481. if (q2spi_pkt->state == IN_DELETION) {
  1482. list_del(&q2spi_pkt->list);
  1483. q2spi_pkt->state = DELETED;
  1484. found = true;
  1485. break;
  1486. }
  1487. }
  1488. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state:%s\n",
  1489. __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt));
  1490. }
  1491. mutex_unlock(&q2spi->queue_lock);
  1492. if (!found)
  1493. Q2SPI_DEBUG(q2spi, "%s Couldn't find q2spi_pkt:%p\n", __func__, cur_q2spi_pkt);
  1494. q2spi_tx_queue_status(q2spi);
  1495. return found;
  1496. }
  1497. /*
  1498. * q2spi_add_req_to_tx_queue - Add q2spi packets to tx_queue_list
  1499. * @q2spi: pointer to q2spi_geni
  1500. * @q2spi_pkt_ptr: ponter to q2spi_packet
  1501. *
  1502. * This function frames the Q2SPI host request based on request type
  1503. * add the packet to tx_queue_list.
  1504. *
  1505. * Return: 0 on success. Error code on failure.
  1506. */
  1507. int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1508. struct q2spi_packet **q2spi_pkt_ptr)
  1509. {
  1510. struct q2spi_packet *q2spi_pkt = NULL;
  1511. int ret = -EINVAL;
  1512. q2spi_tx_queue_status(q2spi);
  1513. q2spi_print_req_cmd(q2spi, q2spi_req);
  1514. if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == LOCAL_REG_WRITE) {
  1515. ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
  1516. if (ret < 0) {
  1517. Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n",
  1518. __func__, ret);
  1519. return ret;
  1520. }
  1521. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1522. } else if (q2spi_req.cmd == DATA_READ || q2spi_req.cmd == DATA_WRITE) {
  1523. q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__);
  1524. if (!q2spi_pkt)
  1525. return -ENOMEM;
  1526. ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt);
  1527. if (ret < 0) {
  1528. Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n",
  1529. __func__, ret);
  1530. q2spi_kfree(q2spi, q2spi_pkt, __LINE__);
  1531. return ret;
  1532. }
  1533. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%s ret:%d\n",
  1534. __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt), ret);
  1535. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1536. } else if (q2spi_req.cmd == HRF_READ || q2spi_req.cmd == HRF_WRITE) {
  1537. ret = q2spi_hrf_flow(q2spi, q2spi_req, &q2spi_pkt);
  1538. if (ret < 0) {
  1539. Q2SPI_DEBUG(q2spi, "%s Err q2spi_hrf_flow failed ret:%d\n", __func__, ret);
  1540. return ret;
  1541. }
  1542. } else if (q2spi_req.cmd == ABORT) {
  1543. ret = q2spi_abort_command(q2spi, q2spi_req, &q2spi_pkt);
  1544. if (ret < 0) {
  1545. Q2SPI_DEBUG(q2spi, "%s Err abort_command failed ret:%d\n", __func__, ret);
  1546. return ret;
  1547. }
  1548. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1549. } else if (q2spi_req.cmd == SOFT_RESET) {
  1550. ret = q2spi_soft_reset(q2spi, q2spi_req, &q2spi_pkt);
  1551. if (ret < 0) {
  1552. Q2SPI_DEBUG(q2spi, "%s Err soft_reset failed ret:%d\n", __func__, ret);
  1553. return ret;
  1554. }
  1555. list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list);
  1556. } else if (q2spi_req.cmd == Q2SPI_HRF_SLEEP_CMD) {
  1557. q2spi_req.cmd = HRF_WRITE;
  1558. ret = q2spi_hrf_sleep(q2spi, q2spi_req, &q2spi_pkt);
  1559. if (ret < 0) {
  1560. Q2SPI_DEBUG(q2spi, "%s Err q2spi_hrf_sleep failed ret:%d\n",
  1561. __func__, ret);
  1562. return ret;
  1563. }
  1564. } else {
  1565. Q2SPI_ERROR(q2spi, "%s Err cmd:%d\n", __func__, q2spi_req.cmd);
  1566. return -EINVAL;
  1567. }
  1568. if (q2spi_pkt) {
  1569. *q2spi_pkt_ptr = q2spi_pkt;
  1570. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p req_cmd:%d ret:%d\n",
  1571. __func__, q2spi_pkt, q2spi_req.cmd, ret);
  1572. } else {
  1573. Q2SPI_DEBUG(q2spi, "%s req_cmd:%d ret:%d\n", __func__, q2spi_req.cmd, ret);
  1574. }
  1575. return ret;
  1576. }
  1577. /*
  1578. * q2spi_cmd_type_valid - checks if q2spi_request command type is supported
  1579. *
  1580. * @q2spi: Pointer to main q2spi_geni structure
  1581. * @q2spi_req: pointer to q2spi request
  1582. *
  1583. * Return: true if q2spi request command is of valid type, else false
  1584. */
  1585. bool q2spi_cmd_type_valid(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req)
  1586. {
  1587. if (q2spi_req->cmd != LOCAL_REG_READ &&
  1588. q2spi_req->cmd != LOCAL_REG_WRITE &&
  1589. q2spi_req->cmd != DATA_READ &&
  1590. q2spi_req->cmd != DATA_WRITE &&
  1591. q2spi_req->cmd != HRF_READ &&
  1592. q2spi_req->cmd != HRF_WRITE &&
  1593. q2spi_req->cmd != SOFT_RESET &&
  1594. q2spi_req->cmd != ABORT) {
  1595. Q2SPI_DEBUG(q2spi, "%s Err Invalid cmd type %d\n", __func__, q2spi_req->cmd);
  1596. return false;
  1597. }
  1598. if (q2spi_req->cmd != SOFT_RESET && !q2spi_req->data_len) {
  1599. Q2SPI_DEBUG(q2spi, "%s Invalid data len %d bytes\n", __func__, q2spi_req->data_len);
  1600. return false;
  1601. }
  1602. return true;
  1603. }
  1604. static int q2spi_check_resp_avail_buff(struct q2spi_geni *q2spi)
  1605. {
  1606. unsigned int i, count = 0;
  1607. for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) {
  1608. if (!q2spi->resp_buf_used[i])
  1609. count++;
  1610. else
  1611. Q2SPI_DEBUG(q2spi, "%s resp buffer in use %p\n",
  1612. __func__, q2spi->resp_buf_used[i]);
  1613. }
  1614. return count;
  1615. }
  1616. /*
  1617. * q2spi_wakeup_hw_from_sleep - wakeup the slave hw and wait for extension CR
  1618. * @q2spi: pointer to q2spi_geni structure
  1619. *
  1620. * Return: 0 on success. Error code on failure.
  1621. */
  1622. static int q2spi_wakeup_hw_from_sleep(struct q2spi_geni *q2spi)
  1623. {
  1624. unsigned long xfer_timeout = 0;
  1625. long timeout = 0;
  1626. int ret = 0;
  1627. xfer_timeout = msecs_to_jiffies(EXT_CR_TIMEOUT_MSECS);
  1628. reinit_completion(&q2spi->wait_for_ext_cr);
  1629. /* Send gpio wakeup signal on q2spi lines to hw */
  1630. Q2SPI_DEBUG(q2spi, "%s Send wakeup_hw to wakeup client\n", __func__);
  1631. ret = q2spi_wakeup_hw_through_gpio(q2spi);
  1632. if (ret) {
  1633. Q2SPI_ERROR(q2spi, "%s Err q2spi_wakeup_hw_through_gpio\n", __func__);
  1634. return ret;
  1635. }
  1636. Q2SPI_DEBUG(q2spi, "%s Waiting for Extended CR\n", __func__);
  1637. timeout = wait_for_completion_interruptible_timeout(&q2spi->wait_for_ext_cr, xfer_timeout);
  1638. if (timeout <= 0) {
  1639. Q2SPI_ERROR(q2spi, "%s Err timeout %ld for Extended CR\n", __func__, timeout);
  1640. if (timeout == -ERESTARTSYS) {
  1641. q2spi_sys_restart = true;
  1642. return -ERESTARTSYS;
  1643. }
  1644. } else {
  1645. Q2SPI_DEBUG(q2spi, "%s Received Extended CR\n", __func__);
  1646. }
  1647. return ret;
  1648. }
  1649. /*
  1650. * __q2spi_transfer - Queues the work to transfer q2spi packet present in tx queue
  1651. * and wait for its completion
  1652. * @q2spi: pointer to q2spi_geni structure
  1653. * @q2spi_req: Pointer to q2spi_request structure
  1654. * @len: Represents transfer length of the q2spi request
  1655. *
  1656. * This function supports sync mode and queue the work to processor and
  1657. * wait for completion of transfer.
  1658. *
  1659. * Return: returns length of data transferred on success. Failure code in case of async mode
  1660. * or any failures.
  1661. */
  1662. static int __q2spi_transfer(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1663. struct q2spi_packet *q2spi_pkt, size_t len)
  1664. {
  1665. unsigned long xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET);
  1666. long timeout = 0;
  1667. int ret = 0;
  1668. if (!q2spi_req.sync) {
  1669. Q2SPI_ERROR(q2spi, "%s async mode not supported\n", __func__);
  1670. return -EINVAL;
  1671. }
  1672. ret = __q2spi_send_messages(q2spi, (void *)q2spi_pkt);
  1673. if (ret == -ETIMEDOUT) {
  1674. return -ETIMEDOUT;
  1675. } else if (ret) {
  1676. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p __q2spi_send_messages ret:%d\n",
  1677. __func__, q2spi_pkt, ret);
  1678. /* return 0 to userspace to retry the transfer from application */
  1679. return 0;
  1680. }
  1681. if (q2spi_pkt->vtype == VARIANT_5_HRF) {
  1682. ret = q2spi_process_hrf_flow_after_lra(q2spi, q2spi_pkt);
  1683. if (ret) {
  1684. Q2SPI_ERROR(q2spi, "%s Err hrf_flow sma write fail ret %d\n",
  1685. __func__, ret);
  1686. q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
  1687. return ret;
  1688. }
  1689. }
  1690. if (q2spi_pkt->is_client_sleep_pkt) {
  1691. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p client sleep_cmd ret:%d",
  1692. __func__, q2spi_pkt, ret);
  1693. return ret;
  1694. }
  1695. if (q2spi_req.cmd == HRF_WRITE) {
  1696. /* HRF_WRITE */
  1697. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p waiting for bulk_wait\n", __func__, q2spi_pkt);
  1698. timeout = wait_for_completion_interruptible_timeout
  1699. (&q2spi_pkt->bulk_wait, xfer_timeout);
  1700. if (timeout <= 0) {
  1701. Q2SPI_ERROR(q2spi, "%s q2spi_pkt:%p Err timeout %ld for bulk_wait\n",
  1702. __func__, q2spi_pkt, timeout);
  1703. if (timeout == -ERESTARTSYS) {
  1704. q2spi_sys_restart = true;
  1705. return -ERESTARTSYS;
  1706. }
  1707. return -ETIMEDOUT;
  1708. } else if (atomic_read(&q2spi->retry)) {
  1709. atomic_dec(&q2spi->retry);
  1710. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p CR Doorbell Pending try again\n",
  1711. __func__, q2spi_pkt);
  1712. if (atomic_read(&q2spi->doorbell_pending))
  1713. usleep_range(5000, 10000);
  1714. return 0;
  1715. }
  1716. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p bulk_wait completed wait DB clear\n",
  1717. __func__, q2spi_pkt);
  1718. timeout = wait_event_interruptible(q2spi->read_wq,
  1719. !atomic_read(&q2spi->doorbell_pending));
  1720. if (timeout) {
  1721. Q2SPI_DEBUG(q2spi, "%s: %p Err db pending interrupted\n",
  1722. __func__, q2spi_pkt);
  1723. return 0;
  1724. }
  1725. } else if (q2spi_req.cmd == LOCAL_REG_READ) {
  1726. if (copy_to_user(q2spi_req.data_buff, q2spi_pkt->xfer->rx_buf,
  1727. q2spi_req.data_len)) {
  1728. Q2SPI_DEBUG(q2spi, "%s Err copy_to_user fail\n", __func__);
  1729. return -EFAULT;
  1730. }
  1731. Q2SPI_DEBUG(q2spi, "%s ret data_len:%d\n", __func__, q2spi_req.data_len);
  1732. return q2spi_req.data_len;
  1733. }
  1734. return len;
  1735. }
  1736. /*
  1737. * q2spi_transfer_with_retries - queue the transfer to GSI and wait for completion. Also
  1738. * retry the transfer for max count of Q2SPI_MAX_TX_RETRIES in case of transfer timeout
  1739. * @q2spi: pointer to q2spi_geni structure
  1740. * @q2spi_req: pointer to q2spi_request structure
  1741. * @q2spi_pkt: pointer to q2spi packet
  1742. * @len: represents transfer length of the q2spi request
  1743. * @flow_id: Transfer id of q2spi transfer request.
  1744. *
  1745. * Return: size_of(struct q2spi_request) on success. Error code on failure.
  1746. */
  1747. static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
  1748. struct q2spi_packet *cur_q2spi_pkt, size_t len,
  1749. int flow_id, void *user_buf)
  1750. {
  1751. void *data_buf;
  1752. int i, ret = 0;
  1753. for (i = 0; i <= Q2SPI_MAX_TX_RETRIES; i++) {
  1754. ret = __q2spi_transfer(q2spi, q2spi_req, cur_q2spi_pkt, len);
  1755. Q2SPI_DEBUG(q2spi, "%s flow_id:%d ret:%d\n", __func__, flow_id, ret);
  1756. q2spi_free_xfer_tid(q2spi, flow_id);
  1757. if (ret > 0 || i == Q2SPI_MAX_TX_RETRIES) {
  1758. if (ret == len)
  1759. goto transfer_exit;
  1760. if (i == Q2SPI_MAX_TX_RETRIES & ret < 0) {
  1761. /*
  1762. * Shouldn't reach here, retry of transfers failed,
  1763. * could be hw is in bad state.
  1764. */
  1765. Q2SPI_DEBUG(q2spi, "%s %d retries failed, hw_state_is_bad\n",
  1766. __func__, i);
  1767. q2spi->hw_state_is_bad = true;
  1768. q2spi_dump_client_error_regs(q2spi);
  1769. }
  1770. pm_runtime_mark_last_busy(q2spi->dev);
  1771. Q2SPI_DEBUG(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__,
  1772. atomic_read(&q2spi->dev->power.usage_count), __LINE__);
  1773. pm_runtime_put_autosuspend(q2spi->dev);
  1774. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n", __func__,
  1775. atomic_read(&q2spi->dev->power.usage_count));
  1776. return ret;
  1777. } else if (ret == -ERESTARTSYS) {
  1778. Q2SPI_DEBUG(q2spi, "%s system is in restart\n", __func__);
  1779. return ret;
  1780. } else if (ret == -ETIMEDOUT) {
  1781. /* Upon transfer failure's retry here */
  1782. Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d retrying cur_q2spi_pkt:%p\n",
  1783. __func__, ret, i + 1, cur_q2spi_pkt);
  1784. if (i == 0) {
  1785. ret = q2spi_wakeup_hw_from_sleep(q2spi);
  1786. if (ret) {
  1787. Q2SPI_ERROR(q2spi, "%s Err q2spi_wakeup_hw_from_sleep\n",
  1788. __func__);
  1789. pm_runtime_mark_last_busy(q2spi->dev);
  1790. pm_runtime_put_autosuspend(q2spi->dev);
  1791. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend cnt:%d\n",
  1792. __func__,
  1793. atomic_read(&q2spi->dev->power.usage_count));
  1794. return ret;
  1795. }
  1796. }
  1797. /* Should not perform SOFT RESET when UWB sets reserved[0] bit 0 set */
  1798. if (!(q2spi_req.reserved[0] & BIT(0)) && i == 1)
  1799. q2spi_transfer_soft_reset(q2spi);
  1800. cur_q2spi_pkt->state = IN_DELETION;
  1801. q2spi_del_pkt_from_tx_queue(q2spi, cur_q2spi_pkt);
  1802. q2spi_free_q2spi_pkt(cur_q2spi_pkt, __LINE__);
  1803. /* Copy data from user buffer only for write request */
  1804. if (q2spi_req.cmd == LOCAL_REG_WRITE || q2spi_req.cmd == DATA_WRITE ||
  1805. q2spi_req.cmd == HRF_WRITE) {
  1806. data_buf = q2spi_kzalloc(q2spi, q2spi_req.data_len, __LINE__);
  1807. if (!data_buf) {
  1808. Q2SPI_DEBUG(q2spi, "%s Err buf2 alloc failed\n", __func__);
  1809. pm_runtime_mark_last_busy(q2spi->dev);
  1810. pm_runtime_put_autosuspend(q2spi->dev);
  1811. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n",
  1812. __func__,
  1813. atomic_read(&q2spi->dev->power.usage_count));
  1814. return -ENOMEM;
  1815. }
  1816. if (copy_from_user(data_buf, user_buf, q2spi_req.data_len)) {
  1817. Q2SPI_DEBUG(q2spi, "%s Err copy_from_user to buf2 failed\n",
  1818. __func__);
  1819. q2spi_kfree(q2spi, data_buf, __LINE__);
  1820. pm_runtime_mark_last_busy(q2spi->dev);
  1821. pm_runtime_put_autosuspend(q2spi->dev);
  1822. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n",
  1823. __func__,
  1824. atomic_read(&q2spi->dev->power.usage_count));
  1825. return -EFAULT;
  1826. }
  1827. q2spi_req.data_buff = data_buf;
  1828. }
  1829. mutex_lock(&q2spi->queue_lock);
  1830. flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt);
  1831. mutex_unlock(&q2spi->queue_lock);
  1832. if (flow_id < 0) {
  1833. q2spi_kfree(q2spi, data_buf, __LINE__);
  1834. Q2SPI_DEBUG(q2spi, "%s Err Failed to add tx req to queue ret:%d\n",
  1835. __func__, flow_id);
  1836. return -ENOMEM;
  1837. }
  1838. Q2SPI_DEBUG(q2spi, "%s cur_q2spi_pkt=%p\n", __func__, cur_q2spi_pkt);
  1839. } else {
  1840. /* Upon SW error break here */
  1841. break;
  1842. }
  1843. }
  1844. transfer_exit:
  1845. cur_q2spi_pkt->state = IN_DELETION;
  1846. q2spi_del_pkt_from_tx_queue(q2spi, cur_q2spi_pkt);
  1847. q2spi_free_q2spi_pkt(cur_q2spi_pkt, __LINE__);
  1848. pm_runtime_mark_last_busy(q2spi->dev);
  1849. Q2SPI_DEBUG(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__,
  1850. atomic_read(&q2spi->dev->power.usage_count), __LINE__);
  1851. pm_runtime_put_autosuspend(q2spi->dev);
  1852. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n", __func__,
  1853. atomic_read(&q2spi->dev->power.usage_count));
  1854. return ret;
  1855. }
  1856. /*
  1857. * q2spi_transfer_soft_reset - Add soft-reset request in tx_queue list and submit q2spi transfer
  1858. *
  1859. * @q2spi: Pointer to main q2spi_geni structure
  1860. *
  1861. * Return: 0 on success. Error code on failure.
  1862. */
  1863. void q2spi_transfer_soft_reset(struct q2spi_geni *q2spi)
  1864. {
  1865. struct q2spi_packet *cur_q2spi_sr_pkt;
  1866. struct q2spi_request soft_reset_request;
  1867. int ret = 0;
  1868. soft_reset_request.cmd = SOFT_RESET;
  1869. soft_reset_request.sync = 1;
  1870. mutex_lock(&q2spi->queue_lock);
  1871. ret = q2spi_add_req_to_tx_queue(q2spi, soft_reset_request, &cur_q2spi_sr_pkt);
  1872. mutex_unlock(&q2spi->queue_lock);
  1873. if (ret < 0) {
  1874. Q2SPI_ERROR(q2spi, "%s Err q2spi_add_req_to_tx_queue ret:%d\n", __func__, ret);
  1875. return;
  1876. }
  1877. __q2spi_transfer(q2spi, soft_reset_request, cur_q2spi_sr_pkt, 0);
  1878. cur_q2spi_sr_pkt->state = IN_DELETION;
  1879. q2spi_del_pkt_from_tx_queue(q2spi, cur_q2spi_sr_pkt);
  1880. q2spi_kfree(q2spi, cur_q2spi_sr_pkt->xfer, __LINE__);
  1881. cur_q2spi_sr_pkt->xfer = NULL;
  1882. }
  1883. /*
  1884. * q2spi_transfer_check - checks if inputs from user are valid and populates q2spi_request passed
  1885. *
  1886. * @q2spi: Pointer to main q2spi_geni structure
  1887. * @q2spi_req: pointer to q2spi request which need to be populated
  1888. * @buf: Data buffer pointer passed from user space which is of type struct q2spi_transfer
  1889. * @len: Represents transfer length of the transaction
  1890. *
  1891. * Return: 0 if user inputs are valid, else returns linux error codes
  1892. */
  1893. static int q2spi_transfer_check(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req,
  1894. const char __user *buf, size_t len)
  1895. {
  1896. if (q2spi_sys_restart)
  1897. return -ERESTARTSYS;
  1898. if (!q2spi)
  1899. return -EINVAL;
  1900. if (q2spi->port_release) {
  1901. Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__);
  1902. return -ENOENT;
  1903. }
  1904. if (q2spi->hw_state_is_bad) {
  1905. Q2SPI_DEBUG(q2spi, "%s Err Retries failed, check HW state\n", __func__);
  1906. return -EPIPE;
  1907. }
  1908. if (!q2spi_check_resp_avail_buff(q2spi)) {
  1909. Q2SPI_DEBUG(q2spi, "%s Err Short of resp buffers\n", __func__);
  1910. return -EAGAIN;
  1911. }
  1912. if (len != sizeof(struct q2spi_request)) {
  1913. Q2SPI_DEBUG(q2spi, "%s Err Invalid length %zx Expected %lx\n",
  1914. __func__, len, sizeof(struct q2spi_request));
  1915. return -EINVAL;
  1916. }
  1917. if (copy_from_user(q2spi_req, buf, sizeof(struct q2spi_request))) {
  1918. Q2SPI_DEBUG(q2spi, "%s Err copy_from_user failed\n", __func__);
  1919. return -EFAULT;
  1920. }
  1921. Q2SPI_DEBUG(q2spi, "%s cmd:%d data_len:%d addr:%d proto:%d ep:%d\n",
  1922. __func__, q2spi_req->cmd, q2spi_req->data_len, q2spi_req->addr,
  1923. q2spi_req->proto_ind, q2spi_req->end_point);
  1924. Q2SPI_DEBUG(q2spi, "%s priority:%d flow_id:%d sync:%d\n",
  1925. __func__, q2spi_req->priority, q2spi_req->flow_id, q2spi_req->sync);
  1926. if (!q2spi_cmd_type_valid(q2spi, q2spi_req))
  1927. return -EINVAL;
  1928. if (q2spi_req->addr > Q2SPI_SLAVE_END_ADDR) {
  1929. Q2SPI_DEBUG(q2spi, "%s Err Invalid address:%x\n", __func__, q2spi_req->addr);
  1930. return -EINVAL;
  1931. }
  1932. return 0;
  1933. }
  1934. /*
  1935. * q2spi_transfer - write file operation
  1936. * @filp: file pointer of q2spi device
  1937. * @buf: Data buffer pointer passed from user space which is of type struct q2spi_transfer
  1938. * @len: Represents transfer length of the transaction
  1939. * @f_pos: file pointer position
  1940. *
  1941. * User space calls write api to initiate read/write transfer request from Q2SPI host.
  1942. *
  1943. * Return: returns length of data transferred on success. Failure code in case of any failures.
  1944. */
  1945. static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t len, loff_t *f_pos)
  1946. {
  1947. struct q2spi_geni *q2spi;
  1948. struct q2spi_request q2spi_req;
  1949. struct q2spi_packet *cur_q2spi_pkt;
  1950. void *data_buf = NULL, *user_buf;
  1951. int ret, flow_id = 0;
  1952. if (!filp || !buf || !len || !filp->private_data) {
  1953. pr_err("%s Err Null pointer\n", __func__);
  1954. return -EINVAL;
  1955. }
  1956. q2spi = filp->private_data;
  1957. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid);
  1958. q2spi_wait_for_doorbell_setup_ready(q2spi);
  1959. ret = q2spi_transfer_check(q2spi, &q2spi_req, buf, len);
  1960. if (ret)
  1961. return ret;
  1962. if (q2spi_req.cmd == HRF_WRITE) {
  1963. q2spi_req.addr = Q2SPI_HRF_PUSH_ADDRESS;
  1964. q2spi_req.sync = 1;
  1965. q2spi_req.priority = 1;
  1966. q2spi_req.data_len += ((q2spi_req.data_len % DATA_WORD_LEN) ?
  1967. (DATA_WORD_LEN - (q2spi_req.data_len % DATA_WORD_LEN)) : 0);
  1968. }
  1969. if (q2spi_req.cmd == LOCAL_REG_WRITE || q2spi_req.cmd == DATA_WRITE ||
  1970. q2spi_req.cmd == HRF_WRITE) {
  1971. data_buf = q2spi_kzalloc(q2spi, q2spi_req.data_len, __LINE__);
  1972. if (!data_buf) {
  1973. Q2SPI_DEBUG(q2spi, "%s Err buffer alloc failed\n", __func__);
  1974. return -ENOMEM;
  1975. }
  1976. if (copy_from_user(data_buf, q2spi_req.data_buff, q2spi_req.data_len)) {
  1977. Q2SPI_DEBUG(q2spi, "%s Err copy_from_user failed\n", __func__);
  1978. q2spi_kfree(q2spi, data_buf, __LINE__);
  1979. return -EFAULT;
  1980. }
  1981. user_buf = q2spi_req.data_buff;
  1982. q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi_transfer", (char *)data_buf,
  1983. q2spi_req.data_len);
  1984. q2spi_req.data_buff = data_buf;
  1985. }
  1986. if (atomic_read(&q2spi->doorbell_pending)) {
  1987. Q2SPI_DEBUG(q2spi, "%s CR Doorbell Pending\n", __func__);
  1988. usleep_range(1000, 2000);
  1989. }
  1990. Q2SPI_DEBUG(q2spi, "%s PM get_sync count:%d\n", __func__,
  1991. atomic_read(&q2spi->dev->power.usage_count));
  1992. ret = pm_runtime_get_sync(q2spi->dev);
  1993. if (ret < 0) {
  1994. Q2SPI_ERROR(q2spi, "%s Err for PM get\n", __func__);
  1995. pm_runtime_put_noidle(q2spi->dev);
  1996. pm_runtime_set_suspended(q2spi->dev);
  1997. return ret;
  1998. }
  1999. Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__,
  2000. atomic_read(&q2spi->dev->power.usage_count));
  2001. mutex_lock(&q2spi->queue_lock);
  2002. reinit_completion(&q2spi->sma_wr_comp);
  2003. flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt);
  2004. mutex_unlock(&q2spi->queue_lock);
  2005. if (flow_id < 0) {
  2006. q2spi_kfree(q2spi, data_buf, __LINE__);
  2007. Q2SPI_DEBUG(q2spi, "%s Err Failed to add tx request ret:%d\n", __func__, flow_id);
  2008. pm_runtime_mark_last_busy(q2spi->dev);
  2009. pm_runtime_put_autosuspend(q2spi->dev);
  2010. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n",
  2011. __func__, atomic_read(&q2spi->dev->power.usage_count));
  2012. return -ENOMEM;
  2013. }
  2014. Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, flow_id);
  2015. ret = q2spi_transfer_with_retries(q2spi, q2spi_req, cur_q2spi_pkt, len, flow_id, user_buf);
  2016. Q2SPI_DEBUG(q2spi, "%s transfer_with_retries ret:%d\n", __func__, ret);
  2017. return ret;
  2018. }
  2019. static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
  2020. {
  2021. struct q2spi_geni *q2spi;
  2022. struct q2spi_client_request cr_request;
  2023. struct q2spi_client_dma_pkt *q2spi_cr_var3;
  2024. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp1, *q2spi_pkt_tmp2;
  2025. int ret = 0;
  2026. long timeout = 0;
  2027. if (q2spi_sys_restart)
  2028. return -ERESTARTSYS;
  2029. if (!filp || !buf || !count || !filp->private_data) {
  2030. pr_err("%s Err Null pointer\n", __func__);
  2031. return -EINVAL;
  2032. }
  2033. q2spi = filp->private_data;
  2034. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid);
  2035. if (q2spi->hw_state_is_bad) {
  2036. Q2SPI_DEBUG(q2spi, "%s Err Retries failed, check HW state\n", __func__);
  2037. return -EPIPE;
  2038. }
  2039. Q2SPI_DEBUG(q2spi, "%s PM get_sync count:%d\n", __func__,
  2040. atomic_read(&q2spi->dev->power.usage_count));
  2041. ret = pm_runtime_get_sync(q2spi->dev);
  2042. if (ret < 0) {
  2043. Q2SPI_ERROR(q2spi, "%s Err for PM get\n", __func__);
  2044. pm_runtime_put_noidle(q2spi->dev);
  2045. pm_runtime_set_suspended(q2spi->dev);
  2046. return ret;
  2047. }
  2048. Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__,
  2049. atomic_read(&q2spi->dev->power.usage_count));
  2050. q2spi_tx_queue_status(q2spi);
  2051. if (copy_from_user(&cr_request, buf, sizeof(struct q2spi_client_request)) != 0) {
  2052. Q2SPI_ERROR(q2spi, "%s Err copy from user failed PID=%d\n", __func__, current->pid);
  2053. return -EFAULT;
  2054. }
  2055. Q2SPI_DEBUG(q2spi, "%s waiting on wait_event_interruptible\n", __func__);
  2056. /* Wait for Rx data available with timeout */
  2057. timeout = wait_event_interruptible_timeout(q2spi->read_wq, atomic_read(&q2spi->rx_avail),
  2058. msecs_to_jiffies(Q2SPI_RESPONSE_WAIT_TIMEOUT));
  2059. if (timeout <= 0) {
  2060. Q2SPI_DEBUG(q2spi, "%s Err wait interrupted timeout:%ld\n", __func__, timeout);
  2061. return -ETIMEDOUT;
  2062. }
  2063. atomic_dec(&q2spi->rx_avail);
  2064. Q2SPI_DEBUG(q2spi, "%s wait unblocked ret:%d\n", __func__, ret);
  2065. mutex_lock(&q2spi->queue_lock);
  2066. list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) {
  2067. if (q2spi_pkt_tmp1->state == DATA_AVAIL) {
  2068. q2spi_pkt = q2spi_pkt_tmp1;
  2069. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p data avail for user\n",
  2070. __func__, q2spi_pkt);
  2071. break;
  2072. }
  2073. Q2SPI_DEBUG(q2spi, "%s check q2spi_pkt %p state:%s\n",
  2074. __func__, q2spi_pkt_tmp1, q2spi_pkt_state(q2spi_pkt_tmp1));
  2075. }
  2076. mutex_unlock(&q2spi->queue_lock);
  2077. if (!q2spi_pkt) {
  2078. Q2SPI_ERROR(q2spi, "%s Err No q2spi_pkt available\n", __func__);
  2079. return -EINVAL;
  2080. }
  2081. Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt = %p, cr_hdr_type:0x%x\n",
  2082. __func__, q2spi_pkt, q2spi_pkt->cr_hdr_type);
  2083. if (q2spi_pkt->cr_hdr_type == CR_HDR_VAR3) {
  2084. q2spi_cr_var3 = &q2spi_pkt->cr_var3;
  2085. Q2SPI_DEBUG(q2spi, "q2spi_cr_var3 len_part1:%d len_part2:%d\n",
  2086. q2spi_cr_var3->dw_len_part1, q2spi_cr_var3->dw_len_part2);
  2087. Q2SPI_DEBUG(q2spi, "q2spi_cr_var3 flow_id:%d arg1:0x%x arg2:0x%x arg3:0x%x\n",
  2088. q2spi_cr_var3->flow_id, q2spi_cr_var3->arg1, q2spi_cr_var3->arg2,
  2089. q2spi_cr_var3->arg3);
  2090. /*
  2091. * Doorbell case tid will be updated by client.
  2092. * q2spi send the ID to userspce
  2093. * so that it will call HC with this flow id for async case
  2094. */
  2095. cr_request.flow_id = q2spi_cr_var3->flow_id;
  2096. cr_request.cmd = q2spi_pkt->cr_hdr.cmd;
  2097. cr_request.data_len = q2spi_pkt->var3_data_len;
  2098. cr_request.end_point = q2spi_cr_var3->arg2;
  2099. cr_request.proto_ind = q2spi_cr_var3->arg3;
  2100. Q2SPI_DEBUG(q2spi, "%s CR cmd:%d flow_id:%d len:%d ep:%d proto:%d status:%d\n",
  2101. __func__, cr_request.cmd, cr_request.flow_id, cr_request.data_len,
  2102. cr_request.end_point, cr_request.proto_ind, cr_request.status);
  2103. } else if (q2spi_pkt->cr_hdr_type == CR_HDR_BULK) {
  2104. Q2SPI_DEBUG(q2spi, "%s cr_request.flow_id:%d status:%d\n",
  2105. __func__, cr_request.flow_id, cr_request.status);
  2106. } else {
  2107. Q2SPI_ERROR(q2spi, "%s Err Unsupported CR Type\n", __func__);
  2108. return -EINVAL;
  2109. }
  2110. Q2SPI_DEBUG(q2spi, "%s data_len:%d ep:%d proto:%d cmd%d status%d flow_id:%d",
  2111. __func__, cr_request.data_len, cr_request.end_point, cr_request.proto_ind,
  2112. cr_request.cmd, cr_request.status, cr_request.flow_id);
  2113. if (!q2spi_pkt->xfer || !q2spi_pkt->xfer->rx_buf) {
  2114. Q2SPI_ERROR(q2spi, "%s Err q2spi_pkt rx_buf is NULL\n", __func__);
  2115. return -EAGAIN;
  2116. }
  2117. q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi_response",
  2118. (char *)q2spi_pkt->xfer->rx_buf, cr_request.data_len);
  2119. ret = copy_to_user(buf, &cr_request, sizeof(struct q2spi_client_request));
  2120. if (ret) {
  2121. Q2SPI_ERROR(q2spi, "%s Err copy_to_user failed ret:%d", __func__, ret);
  2122. return -EAGAIN;
  2123. }
  2124. ret = copy_to_user(cr_request.data_buff,
  2125. (void *)q2spi_pkt->xfer->rx_buf, cr_request.data_len);
  2126. if (ret) {
  2127. Q2SPI_ERROR(q2spi, "%s Err copy_to_user data_buff failed ret:%d", __func__, ret);
  2128. return -EAGAIN;
  2129. }
  2130. ret = (sizeof(struct q2spi_client_request) - ret);
  2131. q2spi_tx_queue_status(q2spi);
  2132. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state:%s\n",
  2133. __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt));
  2134. q2spi_unmap_rx_buf(q2spi_pkt);
  2135. q2spi_pkt->state = IN_DELETION;
  2136. if (q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt))
  2137. q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__);
  2138. Q2SPI_DEBUG(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__,
  2139. atomic_read(&q2spi->dev->power.usage_count), __LINE__);
  2140. pm_runtime_mark_last_busy(q2spi->dev);
  2141. pm_runtime_put_autosuspend(q2spi->dev);
  2142. Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n", __func__,
  2143. atomic_read(&q2spi->dev->power.usage_count));
  2144. Q2SPI_DEBUG(q2spi, "%s End ret:%d PID=%d", __func__, ret, current->pid);
  2145. return ret;
  2146. }
  2147. static __poll_t q2spi_poll(struct file *filp, poll_table *wait)
  2148. {
  2149. struct q2spi_geni *q2spi;
  2150. __poll_t mask = 0;
  2151. if (q2spi_sys_restart)
  2152. return -ERESTARTSYS;
  2153. if (!filp || !filp->private_data) {
  2154. pr_err("%s Err Null pointer\n", __func__);
  2155. return -EINVAL;
  2156. }
  2157. q2spi = filp->private_data;
  2158. poll_wait(filp, &q2spi->readq, wait);
  2159. Q2SPI_DEBUG(q2spi, "%s PID:%d\n", __func__, current->pid);
  2160. if (atomic_read(&q2spi->rx_avail)) {
  2161. mask = (POLLIN | POLLRDNORM);
  2162. Q2SPI_DEBUG(q2spi, "%s RX data available\n", __func__);
  2163. }
  2164. return mask;
  2165. }
  2166. /**
  2167. * q2spi_flush_pending_crs - check any pending CRs to consume
  2168. * @q2spi: pointer to q2spi_geni driver data
  2169. *
  2170. * Return: none
  2171. */
  2172. static void q2spi_flush_pending_crs(struct q2spi_geni *q2spi)
  2173. {
  2174. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp;
  2175. Q2SPI_INFO(q2spi, "%s: PID=%d\n", __func__, current->pid);
  2176. /* Delay to ensure any pending CRs in progress are consumed */
  2177. usleep_range(10000, 20000);
  2178. q2spi_tx_queue_status(q2spi);
  2179. mutex_lock(&q2spi->queue_lock);
  2180. list_for_each_entry_safe(q2spi_pkt, q2spi_pkt_tmp, &q2spi->tx_queue_list, list) {
  2181. if (q2spi_pkt->state == DATA_AVAIL) {
  2182. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p data avail, force delete\n",
  2183. __func__, q2spi_pkt);
  2184. q2spi_unmap_rx_buf(q2spi_pkt);
  2185. q2spi_pkt->state = IN_DELETION;
  2186. list_del(&q2spi_pkt->list);
  2187. q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__);
  2188. } else {
  2189. Q2SPI_DEBUG(q2spi, "%s Check q2spi_pkt %p state:%s!!!\n",
  2190. __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt));
  2191. }
  2192. }
  2193. mutex_unlock(&q2spi->queue_lock);
  2194. }
  2195. static int q2spi_release(struct inode *inode, struct file *filp)
  2196. {
  2197. struct q2spi_geni *q2spi;
  2198. int ret = 0;
  2199. if (q2spi_sys_restart)
  2200. return -ERESTARTSYS;
  2201. if (!filp || !filp->private_data) {
  2202. pr_err("%s Err close return\n", __func__);
  2203. return -EINVAL;
  2204. }
  2205. q2spi = filp->private_data;
  2206. Q2SPI_DEBUG(q2spi, "%s PID:%d allocs:%d\n",
  2207. __func__, current->pid, atomic_read(&q2spi->alloc_count));
  2208. atomic_set(&q2spi->sma_wr_pending, 0);
  2209. atomic_set(&q2spi->sma_rd_pending, 0);
  2210. if (q2spi->hw_state_is_bad) {
  2211. Q2SPI_DEBUG(q2spi, "%s Err check HW state\n", __func__);
  2212. return -EPIPE;
  2213. }
  2214. ret = pm_runtime_get_sync(q2spi->dev);
  2215. if (ret < 0)
  2216. Q2SPI_ERROR(q2spi, "%s Err for PM get ret:%d\n", __func__, ret);
  2217. q2spi->port_release = true;
  2218. q2spi_flush_pending_crs(q2spi);
  2219. q2spi->doorbell_setup = false;
  2220. q2spi_geni_resources_off(q2spi);
  2221. q2spi_tx_queue_status(q2spi);
  2222. atomic_set(&q2spi->doorbell_pending, 0);
  2223. ret = pm_runtime_put_sync_suspend(q2spi->dev);
  2224. Q2SPI_DEBUG(q2spi, "%s PM put sync suspend ret:%d\n", __func__, ret);
  2225. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_shutdown);
  2226. if (ret) {
  2227. Q2SPI_ERROR(q2spi, "%s: Err failed to pinctrl state to gpio, ret:%d\n",
  2228. __func__, ret);
  2229. }
  2230. Q2SPI_DEBUG(q2spi, "%s End allocs:%d\n", __func__, atomic_read(&q2spi->alloc_count));
  2231. return 0;
  2232. }
  2233. static const struct file_operations q2spi_fops = {
  2234. .owner = THIS_MODULE,
  2235. .open = q2spi_open,
  2236. .write = q2spi_transfer,
  2237. .read = q2spi_response,
  2238. .poll = q2spi_poll,
  2239. .release = q2spi_release,
  2240. };
  2241. static int q2spi_se_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi,
  2242. int *clk_idx, int *clk_div)
  2243. {
  2244. unsigned long sclk_freq;
  2245. unsigned long res_freq;
  2246. struct geni_se *se = &q2spi->se;
  2247. int ret = 0;
  2248. ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling), clk_idx,
  2249. &sclk_freq, false);
  2250. if (ret) {
  2251. Q2SPI_ERROR(q2spi, "%s Err Failed(%d) to find src clk for 0x%x\n",
  2252. __func__, ret, speed_hz);
  2253. return ret;
  2254. }
  2255. *clk_div = DIV_ROUND_UP(sclk_freq, (q2spi->oversampling * speed_hz));
  2256. if (!(*clk_div)) {
  2257. Q2SPI_ERROR(q2spi, "%s Err sclk:%lu oversampling:%d speed:%u\n",
  2258. __func__, sclk_freq, q2spi->oversampling, speed_hz);
  2259. return -EINVAL;
  2260. }
  2261. res_freq = (sclk_freq / (*clk_div));
  2262. Q2SPI_DEBUG(q2spi, "%s req speed:%u resultant:%lu sclk:%lu, idx:%d, div:%d\n",
  2263. __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
  2264. ret = clk_set_rate(se->clk, sclk_freq);
  2265. if (ret) {
  2266. Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret);
  2267. return ret;
  2268. }
  2269. return 0;
  2270. }
  2271. /**
  2272. * q2spi_set_clock - Q2SPI SE clock configuration
  2273. * @q2spi_geni: controller to process queue
  2274. * @clk_hz: SE clock in hz
  2275. *
  2276. * Set the Serial clock and dividers required as per the
  2277. * desired speed.
  2278. *
  2279. * Return: 0 on success. Error code on failure.
  2280. */
  2281. static int q2spi_set_clock(struct q2spi_geni *q2spi, unsigned long clk_hz)
  2282. {
  2283. u32 clk_sel, idx, div;
  2284. struct geni_se *se = &q2spi->se;
  2285. int ret;
  2286. if (clk_hz == q2spi->cur_speed_hz)
  2287. return 0;
  2288. ret = q2spi_se_clk_cfg(clk_hz, q2spi, &idx, &div);
  2289. if (ret) {
  2290. Q2SPI_ERROR(q2spi, "Err setting clk to %lu: %d\n", clk_hz, ret);
  2291. return ret;
  2292. }
  2293. /*
  2294. * Q2SPI core clock gets configured with the requested frequency
  2295. * or the frequency closer to the requested frequency.
  2296. * For that reason requested frequency is stored in the
  2297. * cur_speed_hz and referred in the consecutive transfer instead
  2298. * of calling clk_get_rate() API.
  2299. */
  2300. q2spi->cur_speed_hz = clk_hz;
  2301. clk_sel = idx & CLK_SEL_MSK;
  2302. q2spi->m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
  2303. writel(clk_sel, se->base + SE_GENI_CLK_SEL);
  2304. writel(q2spi->m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
  2305. Q2SPI_DEBUG(q2spi, "%s speed_hz:%u clk_sel:0x%x m_clk_cfg:0x%x div:%d\n",
  2306. __func__, q2spi->cur_speed_hz, clk_sel, q2spi->m_clk_cfg, div);
  2307. return ret;
  2308. }
  2309. void q2spi_geni_se_dump_regs(struct q2spi_geni *q2spi)
  2310. {
  2311. mutex_lock(&q2spi->geni_resource_lock);
  2312. if (!q2spi->resources_on) {
  2313. Q2SPI_DEBUG(q2spi, "%s: Err cannot dump, resources are off!!!\n", __func__);
  2314. mutex_unlock(&q2spi->geni_resource_lock);
  2315. return;
  2316. }
  2317. Q2SPI_ERROR(q2spi, "GENI_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_STATUS));
  2318. Q2SPI_ERROR(q2spi, "SPI_TRANS_CFG: 0x%x\n", geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG));
  2319. Q2SPI_ERROR(q2spi, "SE_GENI_IOS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_IOS));
  2320. Q2SPI_ERROR(q2spi, "SE_GENI_M_CMD0: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_CMD0));
  2321. Q2SPI_ERROR(q2spi, "GENI_M_CMD_CTRL_REG: 0x%x\n",
  2322. geni_read_reg(q2spi->base, SE_GENI_M_CMD_CTRL_REG));
  2323. Q2SPI_ERROR(q2spi, "GENI_M_IRQ_STATUS: 0x%x\n",
  2324. geni_read_reg(q2spi->base, SE_GENI_M_IRQ_STATUS));
  2325. Q2SPI_ERROR(q2spi, "GENI_M_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_IRQ_EN));
  2326. Q2SPI_ERROR(q2spi, "GENI_TX_FIFO_STATUS: 0x%x\n",
  2327. geni_read_reg(q2spi->base, SE_GENI_TX_FIFO_STATUS));
  2328. Q2SPI_ERROR(q2spi, "GENI_RX_FIFO_STATUS: 0x%x\n",
  2329. geni_read_reg(q2spi->base, SE_GENI_RX_FIFO_STATUS));
  2330. Q2SPI_ERROR(q2spi, "DMA_TX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_L));
  2331. Q2SPI_ERROR(q2spi, "DMA_TX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_H));
  2332. Q2SPI_ERROR(q2spi, "DMA_TX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_ATTR));
  2333. Q2SPI_ERROR(q2spi, "DMA_TX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN));
  2334. Q2SPI_ERROR(q2spi, "DMA_TX_IRQ_STAT: 0x%x\n",
  2335. geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT));
  2336. Q2SPI_ERROR(q2spi, "DMA_TX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN_IN));
  2337. Q2SPI_ERROR(q2spi, "DMA_RX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_L));
  2338. Q2SPI_ERROR(q2spi, "DMA_RX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_H));
  2339. Q2SPI_ERROR(q2spi, "DMA_RX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_ATTR));
  2340. Q2SPI_ERROR(q2spi, "DMA_RX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN));
  2341. Q2SPI_ERROR(q2spi, "DMA_RX_IRQ_STAT: 0x%x\n",
  2342. geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT));
  2343. Q2SPI_ERROR(q2spi, "DMA_RX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN_IN));
  2344. Q2SPI_ERROR(q2spi, "DMA_DEBUG_REG0: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_DEBUG_REG0));
  2345. mutex_unlock(&q2spi->geni_resource_lock);
  2346. }
  2347. static irqreturn_t q2spi_geni_wakeup_isr(int irq, void *data)
  2348. {
  2349. struct q2spi_geni *q2spi = data;
  2350. Q2SPI_DEBUG(q2spi, "%s PID:%d\n", __func__, current->pid);
  2351. schedule_work(&q2spi->q2spi_wakeup_work);
  2352. return IRQ_HANDLED;
  2353. }
  2354. static irqreturn_t q2spi_geni_irq(int irq, void *data)
  2355. {
  2356. struct q2spi_geni *q2spi = data;
  2357. unsigned int m_irq_status;
  2358. unsigned int s_irq_status;
  2359. unsigned int dma_tx_status;
  2360. unsigned int dma_rx_status;
  2361. m_irq_status = geni_read_reg(q2spi->base, SE_GENI_M_IRQ_STATUS);
  2362. s_irq_status = geni_read_reg(q2spi->base, SE_GENI_S_IRQ_STATUS);
  2363. dma_tx_status = geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT);
  2364. dma_rx_status = geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT);
  2365. Q2SPI_DEBUG(q2spi, "%s sirq 0x%x mirq:0x%x dma_tx:0x%x dma_rx:0x%x\n",
  2366. __func__, s_irq_status, m_irq_status, dma_tx_status, dma_rx_status);
  2367. geni_write_reg(m_irq_status, q2spi->base, SE_GENI_M_IRQ_CLEAR);
  2368. geni_write_reg(s_irq_status, q2spi->base, SE_GENI_S_IRQ_CLEAR);
  2369. geni_write_reg(dma_tx_status, q2spi->base, SE_DMA_TX_IRQ_CLR);
  2370. geni_write_reg(dma_rx_status, q2spi->base, SE_DMA_RX_IRQ_CLR);
  2371. return IRQ_HANDLED;
  2372. }
  2373. /*
  2374. * q2spi_dump_client_error_regs - Dump Q2SPI slave error registers using LRA
  2375. * @q2spi: Pointer to main q2spi_geni structure
  2376. *
  2377. * Read Q2SPI_SLAVE_ERROR register for errors encounted in slave
  2378. * Read Q2SPI_HDR_ERROR register for error encountered in header parsing
  2379. */
  2380. void q2spi_dump_client_error_regs(struct q2spi_geni *q2spi)
  2381. {
  2382. int ret = 0;
  2383. ret = q2spi_read_reg(q2spi, Q2SPI_SLAVE_ERROR);
  2384. if (ret)
  2385. Q2SPI_ERROR(q2spi, "Err SLAVE_ERROR Reg read failed: %d\n", ret);
  2386. ret = q2spi_read_reg(q2spi, Q2SPI_HDR_ERROR);
  2387. if (ret)
  2388. Q2SPI_ERROR(q2spi, "Err HDR_ERROR Reg read failed: %d\n", ret);
  2389. }
  2390. static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt)
  2391. {
  2392. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  2393. struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer;
  2394. int ret = 0;
  2395. Q2SPI_DEBUG(q2spi, "%s PID:%d q2spi:%p xfer:%p wait for gsi_lock 2\n",
  2396. __func__, current->pid, q2spi, xfer);
  2397. mutex_lock(&q2spi->gsi_lock);
  2398. Q2SPI_DEBUG(q2spi, "%s PID=%d acquired gsi_lock 2\n", __func__, current->pid);
  2399. ret = q2spi_setup_gsi_xfer(q2spi_pkt);
  2400. if (ret) {
  2401. Q2SPI_ERROR(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret);
  2402. atomic_set(&q2spi->sma_wr_pending, 0);
  2403. atomic_set(&q2spi->doorbell_pending, 0);
  2404. q2spi_geni_se_dump_regs(q2spi);
  2405. gpi_dump_for_geni(q2spi->gsi->tx_c);
  2406. goto unmap_buf;
  2407. }
  2408. Q2SPI_DEBUG(q2spi, "%s PID:%d waiting check_gsi_transfer_completion\n",
  2409. __func__, current->pid);
  2410. ret = check_gsi_transfer_completion(q2spi);
  2411. if (ret) {
  2412. Q2SPI_ERROR(q2spi, "%s PID:%d Err completion timeout: %d\n",
  2413. __func__, current->pid, ret);
  2414. atomic_set(&q2spi->sma_wr_pending, 0);
  2415. atomic_set(&q2spi->doorbell_pending, 0);
  2416. q2spi_geni_se_dump_regs(q2spi);
  2417. dev_err(q2spi->dev, "%s Err dump gsi regs\n", __func__);
  2418. gpi_dump_for_geni(q2spi->gsi->tx_c);
  2419. goto unmap_buf;
  2420. }
  2421. Q2SPI_DEBUG(q2spi, "%s End PID:%d flow_id:%d tx_dma:%p rx_dma:%p, relased gsi_lock 2",
  2422. __func__, current->pid, q2spi_pkt->xfer->tid, (void *)xfer->tx_dma,
  2423. (void *)xfer->rx_dma);
  2424. unmap_buf:
  2425. mutex_unlock(&q2spi->gsi_lock);
  2426. q2spi_unmap_dma_buf_used(q2spi, xfer->tx_dma, xfer->rx_dma);
  2427. return ret;
  2428. }
  2429. /*
  2430. * q2spi_prep_soft_reset_request - Prepare soft reset packet transfer
  2431. * @q2spi: pointer to q2spi_geni
  2432. * @q2spi_pkt: pointer to q2spi packet
  2433. *
  2434. * This function prepare the transfer for soft reset packet to submit to gsi.
  2435. *
  2436. * Return: 0 on success. Error code on failure.
  2437. */
  2438. static int q2spi_prep_soft_reset_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  2439. {
  2440. struct q2spi_host_soft_reset_pkt *reset_pkt;
  2441. struct q2spi_dma_transfer *reset_xfer = q2spi_pkt->xfer;
  2442. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->soft_reset_pkt:%p &q2spi_pkt->soft_reset_pkt:%p\n",
  2443. __func__, q2spi_pkt->soft_reset_pkt, &q2spi_pkt->soft_reset_pkt);
  2444. reset_xfer->cmd = q2spi_pkt->m_cmd_param;
  2445. reset_pkt = q2spi_pkt->soft_reset_pkt;
  2446. reset_xfer->tx_buf = q2spi_pkt->soft_reset_pkt;
  2447. reset_xfer->tx_dma = q2spi_pkt->soft_reset_tx_dma;
  2448. reset_xfer->tx_data_len = 0;
  2449. reset_xfer->tx_len = Q2SPI_HEADER_LEN;
  2450. Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n",
  2451. __func__, reset_xfer->tx_len, reset_xfer->tx_data_len);
  2452. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p\n", __func__,
  2453. reset_xfer->tx_buf, (void *)reset_xfer->tx_dma);
  2454. q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing soft reset tx_buf DMA TX",
  2455. (char *)reset_xfer->tx_buf, reset_xfer->tx_len);
  2456. return 0;
  2457. }
  2458. /*
  2459. * q2spi_prep_var1_request - Prepare q2spi variant1 type packet transfer
  2460. * @q2spi: pointer to q2spi_geni
  2461. * @q2spi_pkt: pointer to q2spi packet
  2462. *
  2463. * This function prepares variant1 type transfer request to submit to gsi.
  2464. *
  2465. * Return: 0 on success. Error code on failure.
  2466. */
  2467. static int q2spi_prep_var1_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  2468. {
  2469. struct q2spi_host_variant1_pkt *q2spi_hc_var1;
  2470. struct q2spi_dma_transfer *var1_xfer = q2spi_pkt->xfer;
  2471. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt:%p\n", __func__, q2spi_pkt->var1_pkt);
  2472. var1_xfer->cmd = q2spi_pkt->m_cmd_param;
  2473. q2spi_hc_var1 = q2spi_pkt->var1_pkt;
  2474. var1_xfer->tx_buf = q2spi_pkt->var1_pkt;
  2475. var1_xfer->tx_dma = q2spi_pkt->var1_tx_dma;
  2476. var1_xfer->tx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4;
  2477. var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len;
  2478. Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n",
  2479. __func__, var1_xfer->tx_len, var1_xfer->tx_data_len);
  2480. var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id;
  2481. if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) {
  2482. var1_xfer->tx_len = Q2SPI_HEADER_LEN;
  2483. Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n",
  2484. __func__, var1_xfer->tx_len, var1_xfer->tx_data_len);
  2485. var1_xfer->rx_buf = q2spi_pkt->xfer->rx_buf;
  2486. var1_xfer->rx_dma = q2spi_pkt->xfer->rx_dma;
  2487. q2spi_pkt->var1_rx_dma = var1_xfer->rx_dma;
  2488. var1_xfer->rx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4;
  2489. var1_xfer->rx_len = var1_xfer->rx_data_len;
  2490. Q2SPI_DEBUG(q2spi, "%s var1_xfer->rx_len:%d var1_xfer->rx_data_len:%d\n",
  2491. __func__, var1_xfer->rx_len, var1_xfer->rx_data_len);
  2492. }
  2493. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__,
  2494. var1_xfer->tx_buf, (void *)var1_xfer->tx_dma,
  2495. var1_xfer->rx_buf, (void *)var1_xfer->rx_dma);
  2496. q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var1 tx_buf DMA TX",
  2497. (char *)var1_xfer->tx_buf, var1_xfer->tx_len);
  2498. return 0;
  2499. }
  2500. /*
  2501. * q2spi_prep_var5_request - Prepare q2spi variant5 type packet transfer
  2502. * @q2spi: pointer to q2spi_geni
  2503. * @q2spi_pkt: pointer to q2spi packet
  2504. *
  2505. * This function prepares variant5 type transfer request to submit to gsi.
  2506. *
  2507. * Return: 0 on success. Error code on failure.
  2508. */
  2509. static int q2spi_prep_var5_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  2510. {
  2511. struct q2spi_host_variant4_5_pkt *q2spi_hc_var5;
  2512. struct q2spi_dma_transfer *var5_xfer = q2spi_pkt->xfer;
  2513. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var5_pkt:%p var5_tx_dma:%p\n",
  2514. __func__, q2spi_pkt->var5_pkt, (void *)q2spi_pkt->var5_tx_dma);
  2515. q2spi_hc_var5 = q2spi_pkt->var5_pkt;
  2516. var5_xfer->cmd = q2spi_pkt->m_cmd_param;
  2517. var5_xfer->tx_buf = q2spi_pkt->var5_pkt;
  2518. var5_xfer->tx_dma = q2spi_pkt->var5_tx_dma;
  2519. var5_xfer->tid = q2spi_pkt->var5_pkt->flow_id;
  2520. var5_xfer->tx_data_len = q2spi_pkt->data_length;
  2521. var5_xfer->tx_len = Q2SPI_HEADER_LEN + var5_xfer->tx_data_len;
  2522. Q2SPI_DEBUG(q2spi, "%s var5_xfer->tx_len:%d var5_xfer->tx_data_len:%d\n",
  2523. __func__, var5_xfer->tx_len, var5_xfer->tx_data_len);
  2524. if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) {
  2525. var5_xfer->rx_buf = q2spi_pkt->xfer->rx_buf;
  2526. var5_xfer->rx_dma = q2spi_pkt->xfer->rx_dma;
  2527. q2spi_pkt->var5_rx_dma = var5_xfer->rx_dma;
  2528. var5_xfer->tx_len = Q2SPI_HEADER_LEN;
  2529. var5_xfer->rx_len =
  2530. ((q2spi_pkt->var5_pkt->dw_len_part1 |
  2531. q2spi_pkt->var5_pkt->dw_len_part2 << 2) * 4) + 4;
  2532. var5_xfer->rx_data_len = q2spi_pkt->data_length;
  2533. Q2SPI_DEBUG(q2spi, "%s var5_pkt:%p cmd:%d flow_id:0x%x len_part1:%d len_part2:%d\n",
  2534. __func__, q2spi_pkt->var5_pkt, q2spi_pkt->var5_pkt->cmd,
  2535. q2spi_pkt->var5_pkt->flow_id, q2spi_pkt->var5_pkt->dw_len_part1,
  2536. q2spi_pkt->var5_pkt->dw_len_part2);
  2537. Q2SPI_DEBUG(q2spi, "%s var5_pkt data_buf:%p var5_xfer->rx_len:%d\n",
  2538. __func__, q2spi_pkt->var5_pkt->data_buf, var5_xfer->rx_len);
  2539. }
  2540. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__,
  2541. var5_xfer->tx_buf, (void *)var5_xfer->tx_dma,
  2542. var5_xfer->rx_buf, (void *)var5_xfer->rx_dma);
  2543. q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var5 tx_buf DMA TX",
  2544. (char *)var5_xfer->tx_buf, Q2SPI_HEADER_LEN);
  2545. if (q2spi_pkt->m_cmd_param == Q2SPI_TX_ONLY) {
  2546. q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var5 data_buf DMA TX",
  2547. (void *)q2spi_pkt->var5_pkt->data_buf, var5_xfer->tx_data_len);
  2548. }
  2549. return 0;
  2550. }
  2551. /*
  2552. * q2spi_prep_hrf_request - Prepare q2spi HRF type packet transfer
  2553. * @q2spi: pointer to q2spi_geni
  2554. * @q2spi_pkt: pointer to q2spi packet
  2555. *
  2556. * This function prepares HRF type transfer request to submit to gsi.
  2557. *
  2558. * Return: 0 on success. Error code on failure.
  2559. */
  2560. static int q2spi_prep_hrf_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  2561. {
  2562. struct q2spi_host_variant1_pkt *q2spi_hc_var1;
  2563. struct q2spi_dma_transfer *var1_xfer = q2spi_pkt->xfer;
  2564. q2spi_hc_var1 = q2spi_pkt->var1_pkt;
  2565. var1_xfer->cmd = Q2SPI_TX_ONLY;
  2566. var1_xfer->tx_buf = q2spi_pkt->var1_pkt;
  2567. var1_xfer->tx_dma = q2spi_pkt->var1_tx_dma;
  2568. var1_xfer->tx_data_len = 16;
  2569. var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len;
  2570. var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id;
  2571. var1_xfer->rx_len = RX_DMA_CR_BUF_SIZE;
  2572. Q2SPI_DEBUG(q2spi, "%s var1_pkt:%p var1_pkt_phy:%p cmd:%d addr:0x%x flow_id:0x%x\n",
  2573. __func__, q2spi_pkt->var1_pkt,
  2574. (void *)q2spi_pkt->var1_tx_dma, q2spi_pkt->var1_pkt->cmd,
  2575. q2spi_pkt->var1_pkt->reg_offset, q2spi_pkt->var1_pkt->flow_id);
  2576. Q2SPI_DEBUG(q2spi, "%s var1_pkt: len:%d data_buf %p\n",
  2577. __func__, q2spi_pkt->var1_pkt->dw_len, q2spi_pkt->var1_pkt->data_buf);
  2578. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n",
  2579. __func__, var1_xfer->tx_buf, (void *)var1_xfer->tx_dma,
  2580. var1_xfer->rx_buf, (void *)var1_xfer->rx_dma);
  2581. q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var1_HRF DMA TX",
  2582. (char *)var1_xfer->tx_buf, var1_xfer->tx_len);
  2583. return 0;
  2584. }
  2585. int q2spi_process_hrf_flow_after_lra(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt)
  2586. {
  2587. unsigned long xfer_timeout = 0;
  2588. long timeout = 0;
  2589. int ret = -1;
  2590. Q2SPI_DEBUG(q2spi, "%s VAR1 wait for doorbell\n", __func__);
  2591. /* Make sure we get the doorbell before continuing for HRF flow */
  2592. xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET);
  2593. timeout = wait_for_completion_interruptible_timeout(&q2spi_pkt->wait_for_db, xfer_timeout);
  2594. if (timeout <= 0) {
  2595. Q2SPI_ERROR(q2spi, "%s Err timeout for doorbell_wait timeout:%ld\n",
  2596. __func__, timeout);
  2597. if (timeout == -ERESTARTSYS) {
  2598. q2spi_sys_restart = true;
  2599. return -ERESTARTSYS;
  2600. }
  2601. return -ETIMEDOUT;
  2602. }
  2603. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d cr_flow_id:%d\n", __func__,
  2604. q2spi_pkt, q2spi_pkt->flow_id, q2spi_pkt->cr_var3.flow_id);
  2605. if (q2spi_pkt->flow_id == q2spi_pkt->cr_var3.flow_id) {
  2606. q2spi_pkt->vtype = VARIANT_5;
  2607. ret = q2spi_prep_var5_request(q2spi, q2spi_pkt);
  2608. if (ret)
  2609. return ret;
  2610. ret = q2spi_gsi_submit(q2spi_pkt);
  2611. if (ret) {
  2612. Q2SPI_ERROR(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret);
  2613. return ret;
  2614. }
  2615. Q2SPI_DEBUG(q2spi, "%s wakeup sma_wr_comp\n", __func__);
  2616. complete_all(&q2spi->sma_wr_comp);
  2617. atomic_set(&q2spi->sma_wr_pending, 0);
  2618. } else {
  2619. Q2SPI_DEBUG(q2spi, "%s Err q2spi_pkt:%p flow_id:%d != cr_flow_id:%d\n",
  2620. __func__, q2spi_pkt, q2spi_pkt->flow_id, q2spi_pkt->cr_var3.flow_id);
  2621. }
  2622. return ret;
  2623. }
  2624. /**
  2625. * __q2spi_send_messages - function which processes q2spi message queue
  2626. * @q2spi: pointer to q2spi_geni
  2627. *
  2628. * This function checks if there is any message in the queue that
  2629. * needs processing and if so call out to the driver to initialize hardware
  2630. * and transfer each message.
  2631. *
  2632. * Return: 0 on success, else error code
  2633. */
  2634. int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr)
  2635. {
  2636. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp1, *q2spi_pkt_tmp2;
  2637. int ret = 0;
  2638. bool cm_flow_pkt = false;
  2639. if (ptr)
  2640. Q2SPI_DEBUG(q2spi, "Enter %s for %p\n", __func__, ptr);
  2641. else
  2642. Q2SPI_DEBUG(q2spi, "Enter %s PID %d\n", __func__, current->pid);
  2643. mutex_lock(&q2spi->send_msgs_lock);
  2644. /* Check if the queue is idle */
  2645. if (list_empty(&q2spi->tx_queue_list)) {
  2646. Q2SPI_DEBUG(q2spi, "%s Tx queue list is empty\n", __func__);
  2647. goto send_msg_exit;
  2648. }
  2649. /* Check if we need take a lock and frame the Q2SPI packet */
  2650. /* if the list is not empty call q2spi_gsi_transfer msg to submit the transfer to GSI */
  2651. mutex_lock(&q2spi->queue_lock);
  2652. list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) {
  2653. if (q2spi_pkt_tmp1->state == NOT_IN_USE &&
  2654. q2spi_pkt_tmp1 == (struct q2spi_packet *)ptr) {
  2655. q2spi_pkt = q2spi_pkt_tmp1;
  2656. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p state:%s\n",
  2657. __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt));
  2658. break;
  2659. }
  2660. Q2SPI_DEBUG(q2spi, "%s check q2spi_pkt %p state:%s\n",
  2661. __func__, q2spi_pkt_tmp1, q2spi_pkt_state(q2spi_pkt_tmp1));
  2662. }
  2663. mutex_unlock(&q2spi->queue_lock);
  2664. if (!q2spi_pkt) {
  2665. Q2SPI_DEBUG(q2spi, "%s Err couldnt find free q2spi pkt in tx queue!!!\n", __func__);
  2666. goto send_msg_exit;
  2667. }
  2668. q2spi_pkt->state = IN_USE;
  2669. Q2SPI_DEBUG(q2spi, "%s send q2spi_pkt %p\n", __func__, q2spi_pkt);
  2670. if (!q2spi_pkt) {
  2671. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt is NULL\n", __func__);
  2672. ret = -EAGAIN;
  2673. goto send_msg_exit;
  2674. }
  2675. if (q2spi_pkt->vtype == VARIANT_1_LRA || q2spi_pkt->vtype == VARIANT_1_HRF)
  2676. ret = q2spi_prep_var1_request(q2spi, q2spi_pkt);
  2677. else if (q2spi_pkt->vtype == VARIANT_5)
  2678. ret = q2spi_prep_var5_request(q2spi, q2spi_pkt);
  2679. else if (q2spi_pkt->vtype == VARIANT_5_HRF)
  2680. ret = q2spi_prep_hrf_request(q2spi, q2spi_pkt);
  2681. else if (q2spi_pkt->vtype == VAR_SOFT_RESET)
  2682. ret = q2spi_prep_soft_reset_request(q2spi, q2spi_pkt);
  2683. if (ret)
  2684. goto send_msg_exit;
  2685. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->vtype=%d cr_hdr_type=%d\n",
  2686. __func__, q2spi_pkt->vtype, q2spi_pkt->cr_hdr_type);
  2687. if (q2spi_pkt->vtype == VARIANT_5) {
  2688. if (q2spi_pkt->var5_pkt->flow_id >= Q2SPI_END_TID_ID) {
  2689. cm_flow_pkt = true;
  2690. Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__,
  2691. q2spi_pkt->var5_pkt->flow_id);
  2692. }
  2693. }
  2694. if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending)) {
  2695. atomic_inc(&q2spi->retry);
  2696. Q2SPI_DEBUG(q2spi, "%s doorbell pending retry\n", __func__);
  2697. complete_all(&q2spi_pkt->bulk_wait);
  2698. q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
  2699. ret = -EAGAIN;
  2700. goto send_msg_exit;
  2701. }
  2702. ret = q2spi_gsi_submit(q2spi_pkt);
  2703. if (ret) {
  2704. Q2SPI_ERROR(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret);
  2705. q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
  2706. goto send_msg_exit;
  2707. }
  2708. if (q2spi_pkt->vtype == VARIANT_5) {
  2709. Q2SPI_DEBUG(q2spi, "%s wakeup sma_wait\n", __func__);
  2710. complete_all(&q2spi->sma_wait);
  2711. Q2SPI_DEBUG(q2spi, "%s wakeup sma_rd_comp\n", __func__);
  2712. complete_all(&q2spi->sma_rd_comp);
  2713. atomic_set(&q2spi->sma_rd_pending, 0);
  2714. }
  2715. send_msg_exit:
  2716. mutex_unlock(&q2spi->send_msgs_lock);
  2717. if (atomic_read(&q2spi->sma_rd_pending))
  2718. atomic_set(&q2spi->sma_rd_pending, 0);
  2719. Q2SPI_DEBUG(q2spi, "%s: line:%d End\n", __func__, __LINE__);
  2720. return ret;
  2721. }
  2722. /**
  2723. * q2spi_send_messages - kthread work function which processes q2spi message queue
  2724. * @work: pointer to kthread work struct contained in the controller struct
  2725. *
  2726. */
  2727. static void q2spi_send_messages(struct kthread_work *work)
  2728. {
  2729. struct q2spi_geni *q2spi = container_of(work, struct q2spi_geni, send_messages);
  2730. int ret = 0;
  2731. ret = __q2spi_send_messages(q2spi, NULL);
  2732. if (ret)
  2733. Q2SPI_DEBUG(q2spi, "%s Err send message failure ret=%d\n", __func__, ret);
  2734. }
  2735. /**
  2736. * q2spi_proto_init - Q2SPI protocol specific initialization
  2737. * @q2spi: pointer to q2spi_geni driver data
  2738. *
  2739. * This function adds Q2SPi protocol specific configuration for
  2740. * cs less mode.
  2741. *
  2742. * Return: 0 on success. Error code on failure.
  2743. */
  2744. static int q2spi_proto_init(struct q2spi_geni *q2spi)
  2745. {
  2746. u32 q2spi_tx_cfg = geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG);
  2747. u32 io3_sel = geni_read_reg(q2spi->base, GENI_CFG_REG80);
  2748. u32 pre_post_dly = geni_read_reg(q2spi->base, SE_SPI_PRE_POST_CMD_DLY);
  2749. u32 word_len = geni_read_reg(q2spi->base, SE_SPI_WORD_LEN);
  2750. u32 spi_delay_reg = geni_read_reg(q2spi->base, SPI_DELAYS_COUNTERS);
  2751. u32 se_geni_cfg_95 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG95);
  2752. u32 se_geni_cfg_103 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG103);
  2753. u32 se_geni_cfg_104 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG104);
  2754. int ret = 0;
  2755. /* 3.2.2.10.1 Q2SPI Protocol Specific Configuration */
  2756. /* Configure SE CLK */
  2757. ret = q2spi_set_clock(q2spi, q2spi->max_speed_hz);
  2758. if (ret) {
  2759. Q2SPI_DEBUG(q2spi, "%s Err set clock failed\n", __func__);
  2760. return ret;
  2761. }
  2762. q2spi_tx_cfg &= ~SPI_NOT_USED_CFG1;
  2763. geni_write_reg(q2spi_tx_cfg, q2spi->base, SE_SPI_TRANS_CFG);
  2764. io3_sel &= ~IO_MACRO_IO3_SEL;
  2765. geni_write_reg(io3_sel, q2spi->base, GENI_CFG_REG80);
  2766. spi_delay_reg |= (SPI_CS_CLK_DLY << M_GP_CNT5_TE2D_SHIFT) & M_GP_CNT5_TE2D;
  2767. spi_delay_reg |= (SPI_PIPE_DLY_TPM << M_GP_CNT6_CN_SHIFT) & M_GP_CNT6_CN;
  2768. spi_delay_reg |= SPI_INTER_WORDS_DLY & M_GP_CNT4_TAN;
  2769. geni_write_reg(spi_delay_reg, q2spi->base, SPI_DELAYS_COUNTERS);
  2770. se_geni_cfg_95 |= M_GP_CNT7_TSN & M_GP_CNT7;
  2771. geni_write_reg(se_geni_cfg_95, q2spi->base, SE_GENI_CFG_REG95);
  2772. Q2SPI_DEBUG(q2spi, "tx_cfg: 0x%x io3_sel:0x%x spi_delay: 0x%x cfg_95:0x%x\n",
  2773. geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG),
  2774. geni_read_reg(q2spi->base, GENI_CFG_REG80),
  2775. geni_read_reg(q2spi->base, SPI_DELAYS_COUNTERS),
  2776. geni_read_reg(q2spi->base, SE_GENI_CFG_REG95));
  2777. se_geni_cfg_103 |= (S_GP_CNT5_TDN << S_GP_CNT5_SHIFT) & S_GP_CNT5;
  2778. se_geni_cfg_104 |= S_GP_CNT7_SSN & S_GP_CNT7;
  2779. geni_write_reg(se_geni_cfg_103, q2spi->base, SE_GENI_CFG_REG103);
  2780. geni_write_reg(se_geni_cfg_104, q2spi->base, SE_GENI_CFG_REG104);
  2781. word_len &= ~WORD_LEN_MSK;
  2782. word_len |= MIN_WORD_LEN & WORD_LEN_MSK;
  2783. geni_write_reg(word_len, q2spi->base, SE_SPI_WORD_LEN);
  2784. Q2SPI_DEBUG(q2spi, "cfg_103: 0x%x cfg_104:0x%x pre_post_dly;0x%x spi_word_len:0x%x\n",
  2785. geni_read_reg(q2spi->base, SE_GENI_CFG_REG103),
  2786. geni_read_reg(q2spi->base, SE_GENI_CFG_REG104),
  2787. pre_post_dly, geni_read_reg(q2spi->base, SE_SPI_WORD_LEN));
  2788. io3_sel &= ~OTHER_IO_OE;
  2789. io3_sel |= (IO_MACRO_IO3_DATA_IN_SEL << IO_MACRO_IO3_DATA_IN_SEL_SHIFT) &
  2790. IO_MACRO_IO3_DATA_IN_SEL_MASK;
  2791. Q2SPI_DEBUG(q2spi, "io3_sel:0x%x %lx TPM:0x%x %d\n", io3_sel,
  2792. (IO_MACRO_IO3_DATA_IN_SEL & IO_MACRO_IO3_DATA_IN_SEL_MASK),
  2793. SPI_PIPE_DLY_TPM, SPI_PIPE_DLY_TPM << M_GP_CNT6_CN_SHIFT);
  2794. return 0;
  2795. }
  2796. /**
  2797. * q2spi_geni_init - Qupv3 and SE initialization
  2798. * @q2spi: pointer to q2spi_geni driver data
  2799. *
  2800. * This is done once per session. Make sure this api
  2801. * is called before any actual transfer begins as it involves
  2802. * generic SW/HW and Q2SPI protocol specific intializations
  2803. * required for a q2spi transfer.
  2804. *
  2805. * Return: 0 on success. Error code on failure.
  2806. */
  2807. static int q2spi_geni_init(struct q2spi_geni *q2spi)
  2808. {
  2809. int proto = 0;
  2810. unsigned int major, minor;
  2811. int ver = 0, ret = 0;
  2812. /* make sure to turn on the resources before this ex: pm_runtime_get_sync(q2spi->dev); */
  2813. proto = geni_se_read_proto(&q2spi->se);
  2814. if (proto != GENI_SE_Q2SPI) {
  2815. Q2SPI_ERROR(q2spi, "Err Invalid proto %d\n", proto);
  2816. return -EINVAL;
  2817. }
  2818. ver = geni_se_get_qup_hw_version(&q2spi->se);
  2819. major = GENI_SE_VERSION_MAJOR(ver);
  2820. minor = GENI_SE_VERSION_MINOR(ver);
  2821. Q2SPI_DEBUG(q2spi, "%s ver:0x%x major:%d minor:%d\n", __func__, ver, major, minor);
  2822. if (major == 1 && minor == 0)
  2823. q2spi->oversampling = 2;
  2824. else
  2825. q2spi->oversampling = 1;
  2826. /* Qupv3 Q2SPI protocol specific Initialization */
  2827. q2spi_proto_init(q2spi);
  2828. q2spi->gsi_mode = (geni_read_reg(q2spi->base, GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE);
  2829. if (q2spi->gsi_mode) {
  2830. q2spi->xfer_mode = GENI_GPI_DMA;
  2831. geni_se_select_mode(&q2spi->se, GENI_GPI_DMA);
  2832. ret = q2spi_geni_gsi_setup(q2spi);
  2833. } else {
  2834. Q2SPI_DEBUG(q2spi, "%s: Err GSI mode not supported!\n", __func__);
  2835. return -EINVAL;
  2836. }
  2837. Q2SPI_DEBUG(q2spi, "%s gsi_mode:%d xfer_mode:%d ret:%d\n",
  2838. __func__, q2spi->gsi_mode, q2spi->xfer_mode, ret);
  2839. return ret;
  2840. }
  2841. /**
  2842. * q2spi_geni_resources_off - turns off geni resources
  2843. * @q2spi: pointer to q2spi_geni driver data
  2844. *
  2845. * Return: none
  2846. */
  2847. void q2spi_geni_resources_off(struct q2spi_geni *q2spi)
  2848. {
  2849. struct geni_se *se = NULL;
  2850. int ret = 0;
  2851. if (q2spi_sys_restart)
  2852. return;
  2853. se = &q2spi->se;
  2854. mutex_lock(&q2spi->geni_resource_lock);
  2855. if (!q2spi->resources_on) {
  2856. Q2SPI_DEBUG(q2spi, "%s: Err Resources already off\n", __func__);
  2857. goto exit_resource_off;
  2858. }
  2859. q2spi->resources_on = false;
  2860. writel(0x1, se->base + GENI_SER_M_CLK_CFG);
  2861. /* Set pinctrl state to sleep configuration */
  2862. if (!IS_ERR_OR_NULL(q2spi->geni_gpio_sleep)) {
  2863. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_sleep);
  2864. if (ret)
  2865. Q2SPI_DEBUG(q2spi, "%s: Err failed to set pinctrl state to sleep, ret:%d\n",
  2866. __func__, ret);
  2867. }
  2868. /* Disable m_ahb, s_ahb and se clks */
  2869. geni_se_common_clks_off(q2spi->se.clk, q2spi->m_ahb_clk, q2spi->s_ahb_clk);
  2870. /* Disable icc */
  2871. ret = geni_icc_disable(&q2spi->se);
  2872. if (ret)
  2873. Q2SPI_DEBUG(q2spi, "%s: Err icc disable failed, ret:%d\n", __func__, ret);
  2874. exit_resource_off:
  2875. mutex_unlock(&q2spi->geni_resource_lock);
  2876. Q2SPI_DEBUG(q2spi, "%s: ret:%d\n", __func__, ret);
  2877. }
  2878. /**
  2879. * q2spi_geni_resources_on - turns on geni resources
  2880. * @q2spi: pointer to q2spi_geni driver data
  2881. *
  2882. * Return: 0 on success. Error code on failure.
  2883. */
  2884. int q2spi_geni_resources_on(struct q2spi_geni *q2spi)
  2885. {
  2886. struct geni_se *se = &q2spi->se;
  2887. int ret = 0;
  2888. mutex_lock(&q2spi->geni_resource_lock);
  2889. if (q2spi->resources_on) {
  2890. Q2SPI_DEBUG(q2spi, "%s: Err Resources already on\n", __func__);
  2891. goto exit_resource_on;
  2892. }
  2893. ret = geni_icc_enable(&q2spi->se);
  2894. if (ret) {
  2895. Q2SPI_DEBUG(q2spi, "%s: Err icc enable failed, ret:%d\n", __func__, ret);
  2896. goto exit_resource_on;
  2897. }
  2898. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_active);
  2899. if (ret) {
  2900. Q2SPI_DEBUG(q2spi, "%s: Err failed to pinctrl state to active, ret:%d\n",
  2901. __func__, ret);
  2902. goto exit_resource_on;
  2903. }
  2904. /* Enable m_ahb, s_ahb and se clks */
  2905. ret = geni_se_common_clks_on(q2spi->se.clk, q2spi->m_ahb_clk, q2spi->s_ahb_clk);
  2906. if (ret) {
  2907. Q2SPI_DEBUG(q2spi, "%s: Err set common_clk_on failed, ret:%d\n", __func__, ret);
  2908. goto exit_resource_on;
  2909. }
  2910. writel(q2spi->m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
  2911. q2spi->resources_on = true;
  2912. exit_resource_on:
  2913. mutex_unlock(&q2spi->geni_resource_lock);
  2914. Q2SPI_DEBUG(q2spi, "%s: ret:%d\n", __func__, ret);
  2915. return ret;
  2916. }
  2917. /**
  2918. * q2spi_get_icc_pinctrl - Enable ICC voting and pinctrl
  2919. * @pdev: pointer to Platform device
  2920. * @q2spi: pointer to q2spi_geni driver data
  2921. *
  2922. * This function will enable icc paths and add bandwidth voting
  2923. * and also get pinctrl state from DTSI.
  2924. *
  2925. * Return: 0 on success. Error code on failure.
  2926. */
  2927. static int q2spi_get_icc_pinctrl(struct platform_device *pdev,
  2928. struct q2spi_geni *q2spi)
  2929. {
  2930. struct geni_se *q2spi_rsc;
  2931. int ret = 0;
  2932. q2spi_rsc = &q2spi->se;
  2933. /* ICC get */
  2934. ret = geni_se_common_resources_init(q2spi_rsc,
  2935. Q2SPI_CORE2X_VOTE, APPS_PROC_TO_QUP_VOTE,
  2936. (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
  2937. if (ret) {
  2938. Q2SPI_DEBUG(q2spi, "Error geni_se_resources_init\n");
  2939. goto get_icc_pinctrl_err;
  2940. }
  2941. Q2SPI_DEBUG(q2spi, "%s GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n",
  2942. __func__, q2spi_rsc->icc_paths[GENI_TO_CORE].avg_bw,
  2943. q2spi_rsc->icc_paths[CPU_TO_GENI].avg_bw,
  2944. q2spi_rsc->icc_paths[GENI_TO_DDR].avg_bw);
  2945. /* call set_bw for once, then do icc_enable/disable */
  2946. ret = geni_icc_set_bw(q2spi_rsc);
  2947. if (ret) {
  2948. Q2SPI_DEBUG(q2spi, "%s icc set bw failed ret:%d\n", __func__, ret);
  2949. goto get_icc_pinctrl_err;
  2950. }
  2951. q2spi->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
  2952. if (IS_ERR_OR_NULL(q2spi->geni_pinctrl)) {
  2953. Q2SPI_DEBUG(q2spi, "No pinctrl config specified!\n");
  2954. ret = PTR_ERR(q2spi->geni_pinctrl);
  2955. goto get_icc_pinctrl_err;
  2956. }
  2957. q2spi->geni_gpio_default = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_DEFAULT);
  2958. if (IS_ERR_OR_NULL(q2spi->geni_gpio_default)) {
  2959. Q2SPI_DEBUG(q2spi, "No default config specified!\n");
  2960. ret = PTR_ERR(q2spi->geni_gpio_default);
  2961. goto get_icc_pinctrl_err;
  2962. }
  2963. q2spi->geni_gpio_active = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_ACTIVE);
  2964. if (IS_ERR_OR_NULL(q2spi->geni_gpio_active)) {
  2965. Q2SPI_DEBUG(q2spi, "No active config specified!\n");
  2966. ret = PTR_ERR(q2spi->geni_gpio_active);
  2967. goto get_icc_pinctrl_err;
  2968. }
  2969. q2spi->geni_gpio_sleep = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_SLEEP);
  2970. if (IS_ERR_OR_NULL(q2spi->geni_gpio_sleep)) {
  2971. Q2SPI_DEBUG(q2spi, "No sleep config specified!\n");
  2972. ret = PTR_ERR(q2spi->geni_gpio_sleep);
  2973. goto get_icc_pinctrl_err;
  2974. }
  2975. q2spi->geni_gpio_shutdown = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_SHUTDOWN);
  2976. if (IS_ERR_OR_NULL(q2spi->geni_gpio_shutdown)) {
  2977. Q2SPI_DEBUG(q2spi, "No shutdown config specified!\n");
  2978. ret = PTR_ERR(q2spi->geni_gpio_shutdown);
  2979. goto get_icc_pinctrl_err;
  2980. }
  2981. get_icc_pinctrl_err:
  2982. return ret;
  2983. }
  2984. /**
  2985. * q2spi_pinctrl_config - Does Pinctrl configuration
  2986. * @pdev: pointer to Platform device
  2987. * @q2spi: pointer to q2spi_geni driver data
  2988. *
  2989. * Return: 0 on success. Error code on failure.
  2990. */
  2991. static int q2spi_pinctrl_config(struct platform_device *pdev, struct q2spi_geni *q2spi)
  2992. {
  2993. int ret = 0;
  2994. /* ICC and PINCTRL initialization */
  2995. ret = q2spi_get_icc_pinctrl(pdev, q2spi);
  2996. if (ret) {
  2997. Q2SPI_DEBUG(q2spi, "pinctrl get failed %d\n", ret);
  2998. return ret;
  2999. }
  3000. return ret;
  3001. }
  3002. /**
  3003. * q2spi_chardev_create - Allocate two character devices dinamically.
  3004. * @pdev: pointer to Platform device
  3005. * @q2spi: pointer to q2spi_geni driver data
  3006. *
  3007. * Allocates a range of char device numbers and adds a char
  3008. * device to the system and creates a device and registers
  3009. * it with sysfs.
  3010. *
  3011. * Return: 0 on success. Error code on failure.
  3012. */
  3013. static int q2spi_chardev_create(struct q2spi_geni *q2spi)
  3014. {
  3015. int ret = 0, i;
  3016. ret = alloc_chrdev_region(&q2spi->chrdev.q2spi_dev, 0, MAX_DEV, "q2spidev");
  3017. if (ret < 0) {
  3018. Q2SPI_DEBUG(q2spi, "%s ret:%d\n", __func__, ret);
  3019. return ret;
  3020. }
  3021. q2spi_cdev_major = MAJOR(q2spi->chrdev.q2spi_dev);
  3022. q2spi->chrdev.q2spi_class = class_create(THIS_MODULE, "q2spidev");
  3023. if (IS_ERR(q2spi->chrdev.q2spi_class)) {
  3024. Q2SPI_DEBUG(q2spi, "%s ret:%lx\n", __func__, PTR_ERR(q2spi->chrdev.q2spi_class));
  3025. ret = PTR_ERR(q2spi->chrdev.q2spi_class);
  3026. goto err_class_create;
  3027. }
  3028. for (i = 0; i < MAX_DEV; i++) {
  3029. cdev_init(&q2spi->chrdev.cdev[i], &q2spi_fops);
  3030. q2spi->chrdev.cdev[i].owner = THIS_MODULE;
  3031. q2spi->chrdev.major = q2spi_cdev_major;
  3032. q2spi->chrdev.minor = i;
  3033. ret = cdev_add(&q2spi->chrdev.cdev[i], MKDEV(q2spi_cdev_major, i), 1);
  3034. if (ret) {
  3035. Q2SPI_DEBUG(q2spi, "cdev_add failed ret:%d\n", ret);
  3036. goto err_cdev_add;
  3037. }
  3038. if (i)
  3039. q2spi->chrdev.class_dev = device_create(q2spi->chrdev.q2spi_class, NULL,
  3040. MKDEV(q2spi_cdev_major, i),
  3041. NULL, "q2spibt");
  3042. else
  3043. q2spi->chrdev.class_dev = device_create(q2spi->chrdev.q2spi_class, NULL,
  3044. MKDEV(q2spi_cdev_major, i),
  3045. NULL, "q2spiuwb");
  3046. if (IS_ERR(q2spi->chrdev.class_dev)) {
  3047. ret = PTR_ERR(q2spi->chrdev.class_dev);
  3048. Q2SPI_DEBUG(q2spi, "failed to create device\n");
  3049. goto err_dev_create;
  3050. }
  3051. Q2SPI_DEBUG(q2spi, "%s q2spi:%p i:%d end\n", __func__, q2spi, i);
  3052. }
  3053. return 0;
  3054. err_dev_create:
  3055. for (i = 0; i < MAX_DEV; i++)
  3056. cdev_del(&q2spi->chrdev.cdev[i]);
  3057. err_cdev_add:
  3058. class_destroy(q2spi->chrdev.q2spi_class);
  3059. err_class_create:
  3060. unregister_chrdev_region(MKDEV(q2spi_cdev_major, 0), MINORMASK);
  3061. return ret;
  3062. }
  3063. /**
  3064. * q2spi_read_reg - read a register of host accesible client register
  3065. * @q2spi: Pointer to main q2spi_geni structure.
  3066. * @reg_offset: specifies register address of the client to be read.
  3067. *
  3068. * This function used to read register of a client specified.
  3069. * It frame local register access command and submit to gsi and
  3070. * wait for gsi completion.
  3071. *
  3072. * Return: 0 for success, negative number for error condition.
  3073. */
  3074. int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset)
  3075. {
  3076. struct q2spi_packet *q2spi_pkt = NULL;
  3077. struct q2spi_dma_transfer *xfer;
  3078. struct q2spi_request q2spi_req;
  3079. int ret = 0;
  3080. q2spi_req.cmd = LOCAL_REG_READ;
  3081. q2spi_req.addr = reg_offset;
  3082. q2spi_req.data_len = 4; /* In bytes */
  3083. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p &q2spi_pkt=%p\n", __func__, q2spi_pkt, &q2spi_pkt);
  3084. ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
  3085. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d\n", __func__, q2spi_pkt, ret);
  3086. if (ret < 0) {
  3087. Q2SPI_DEBUG(q2spi, "Err q2spi_frame_lra failed ret:%d\n", ret);
  3088. return ret;
  3089. }
  3090. xfer = q2spi_pkt->xfer;
  3091. xfer->tx_buf = q2spi_pkt->var1_pkt;
  3092. xfer->tx_dma = q2spi_pkt->var1_tx_dma;
  3093. xfer->rx_buf = q2spi_pkt->xfer->rx_buf;
  3094. xfer->rx_dma = q2spi_pkt->xfer->rx_dma;
  3095. xfer->cmd = q2spi_pkt->m_cmd_param;
  3096. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p cmd:%d\n", __func__, q2spi_pkt, xfer->cmd);
  3097. xfer->tx_data_len = q2spi_req.data_len;
  3098. xfer->tx_len = Q2SPI_HEADER_LEN;
  3099. xfer->rx_data_len = q2spi_req.data_len;
  3100. xfer->rx_len = xfer->rx_data_len;
  3101. xfer->tid = q2spi_pkt->var1_pkt->flow_id;
  3102. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n",
  3103. __func__, xfer->tx_buf, (void *)xfer->tx_dma,
  3104. xfer->rx_buf, (void *)xfer->rx_dma,
  3105. xfer->tx_len, xfer->rx_len);
  3106. q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi read reg tx_buf DMA TX",
  3107. (char *)xfer->tx_buf, xfer->tx_len);
  3108. ret = q2spi_gsi_submit(q2spi_pkt);
  3109. if (ret) {
  3110. Q2SPI_ERROR(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret);
  3111. return ret;
  3112. }
  3113. q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid);
  3114. Q2SPI_DEBUG(q2spi, "Reg:0x%x Read Val = 0x%x\n", reg_offset, *(unsigned int *)xfer->rx_buf);
  3115. return ret;
  3116. }
  3117. /**
  3118. * q2spi_write_reg - write a register of host accesible client register
  3119. * @q2spi: Pointer to main q2spi_geni structure.
  3120. * @reg_offset: specifies register address of the client to be write.
  3121. * @data: spefies value of the register to be write.
  3122. *
  3123. * This function used to write to a register of a client specified.
  3124. * It frame local register access command and submit to gsi and
  3125. * wait for gsi completion.
  3126. *
  3127. * Return: 0 for success, negative number for error condition.
  3128. */
  3129. static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned long data)
  3130. {
  3131. struct q2spi_packet *q2spi_pkt;
  3132. struct q2spi_dma_transfer *xfer;
  3133. struct q2spi_request q2spi_req;
  3134. int ret = 0;
  3135. q2spi_req.cmd = LOCAL_REG_WRITE;
  3136. q2spi_req.addr = reg_offset;
  3137. q2spi_req.data_len = 4;
  3138. q2spi_req.data_buff = &data;
  3139. ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
  3140. if (ret < 0) {
  3141. Q2SPI_ERROR(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret);
  3142. return ret;
  3143. }
  3144. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt);
  3145. xfer = q2spi_pkt->xfer;
  3146. xfer->tx_buf = q2spi_pkt->var1_pkt;
  3147. xfer->tx_dma = q2spi_pkt->var1_tx_dma;
  3148. xfer->cmd = q2spi_pkt->m_cmd_param;
  3149. xfer->tx_data_len = q2spi_req.data_len;
  3150. xfer->tx_len = Q2SPI_HEADER_LEN + xfer->tx_data_len;
  3151. xfer->tid = q2spi_pkt->var1_pkt->flow_id;
  3152. Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n",
  3153. __func__, xfer->tx_buf, (void *)xfer->tx_dma,
  3154. xfer->rx_buf, (void *)xfer->rx_dma,
  3155. xfer->tx_len, xfer->rx_len);
  3156. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt_add:%p\n", __func__, q2spi_pkt->var1_pkt);
  3157. q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi_read_reg tx_buf DMA TX",
  3158. (char *)xfer->tx_buf, xfer->tx_len);
  3159. ret = q2spi_gsi_submit(q2spi_pkt);
  3160. if (ret) {
  3161. Q2SPI_ERROR(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret);
  3162. return ret;
  3163. }
  3164. q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid);
  3165. Q2SPI_DEBUG(q2spi, "%s write to reg success ret:%d\n", __func__, ret);
  3166. return ret;
  3167. }
  3168. /**
  3169. * q2spi_slave_init - Initialization sequence
  3170. * @q2spi: Pointer to main q2spi_geni structure
  3171. *
  3172. * This function performs init sequence with q2spi slave
  3173. * send host command to check client enabled or not
  3174. * read Q2SPI_HOST_CFG.DOORBELL_EN register info from slave
  3175. * Write 1 to each bit of Q2SPI_ERROR_EN to enable error interrupt to Host using doorbell.
  3176. *
  3177. * Return: 0 for success, negative number for error condition.
  3178. */
  3179. static int q2spi_slave_init(struct q2spi_geni *q2spi)
  3180. {
  3181. unsigned long scratch_data = 0xAAAAAAAA;
  3182. unsigned long error_en_data = 0xFFFFFFFF;
  3183. int ret = 0, value = 0;
  3184. int retries = RETRIES;
  3185. Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_SCRATCH0);
  3186. return 0;
  3187. /* Dummy SCRATCH register write */
  3188. ret = q2spi_write_reg(q2spi, Q2SPI_SCRATCH0, scratch_data);
  3189. if (ret) {
  3190. Q2SPI_ERROR(q2spi, "scratch0 write failed: %d\n", ret);
  3191. return ret;
  3192. }
  3193. /* Dummy SCRATCH register read */
  3194. Q2SPI_DEBUG(q2spi, "%s reg: 0x%x\n", __func__, Q2SPI_SCRATCH0);
  3195. ret = q2spi_read_reg(q2spi, Q2SPI_SCRATCH0);
  3196. if (ret) {
  3197. Q2SPI_ERROR(q2spi, "Err scratch0 read failed: %d\n", ret);
  3198. return ret;
  3199. }
  3200. /*
  3201. * Send dummy Host command until Client is enabled.
  3202. * Dummy command can be reading Q2SPI_HW_VERSION register.
  3203. */
  3204. while (retries > 0 && value <= 0) {
  3205. value = q2spi_read_reg(q2spi, Q2SPI_HW_VERSION);
  3206. Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value);
  3207. if (value <= 0)
  3208. Q2SPI_DEBUG(q2spi, "HW_Version read failed: %d\n", ret);
  3209. retries--;
  3210. Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value);
  3211. }
  3212. Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_HOST_CFG);
  3213. ret = q2spi_read_reg(q2spi, Q2SPI_HOST_CFG);
  3214. if (ret) {
  3215. Q2SPI_ERROR(q2spi, "Err HOST CFG read failed: %d\n", ret);
  3216. return ret;
  3217. }
  3218. Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN);
  3219. ret = q2spi_write_reg(q2spi, Q2SPI_ERROR_EN, error_en_data);
  3220. if (ret) {
  3221. Q2SPI_ERROR(q2spi, "Err Error_en reg write failed: %d\n", ret);
  3222. return ret;
  3223. }
  3224. Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN);
  3225. ret = q2spi_read_reg(q2spi, Q2SPI_ERROR_EN);
  3226. if (ret) {
  3227. Q2SPI_ERROR(q2spi, "Err HOST CFG read failed: %d\n", ret);
  3228. return ret;
  3229. }
  3230. return 0;
  3231. }
  3232. /**
  3233. * q2spi_clks_get - get SE and AHB clks
  3234. * @q2spi: Pointer to main q2spi_geni structure
  3235. *
  3236. * This function will get clock resources for SE, M-AHB and S_AHB clocks.
  3237. *
  3238. * Return: 0 for success, negative number for error condition.
  3239. */
  3240. static int q2spi_clks_get(struct q2spi_geni *q2spi)
  3241. {
  3242. int ret = 0;
  3243. q2spi->se_clk = devm_clk_get(q2spi->dev, "se-clk");
  3244. if (ret) {
  3245. Q2SPI_ERROR(q2spi, "Err getting SE clk %d\n", ret);
  3246. return ret;
  3247. }
  3248. q2spi->se.clk = q2spi->se_clk;
  3249. q2spi->m_ahb_clk = devm_clk_get(q2spi->dev->parent, "m-ahb");
  3250. if (IS_ERR(q2spi->m_ahb_clk)) {
  3251. ret = PTR_ERR(q2spi->m_ahb_clk);
  3252. Q2SPI_ERROR(q2spi, "Err getting Main AHB clk %d\n", ret);
  3253. return ret;
  3254. }
  3255. q2spi->s_ahb_clk = devm_clk_get(q2spi->dev->parent, "s-ahb");
  3256. if (IS_ERR(q2spi->s_ahb_clk)) {
  3257. ret = PTR_ERR(q2spi->s_ahb_clk);
  3258. Q2SPI_ERROR(q2spi, "Err getting Secondary AHB clk %d\n", ret);
  3259. return ret;
  3260. }
  3261. return 0;
  3262. }
  3263. /*
  3264. * q2spi_copy_cr_data_to_pkt() - copies cr data to q2spi_pkt
  3265. *
  3266. * @q2spi_pkt: pointer to q2spi_packet
  3267. * @cr_pkt: pointer to cr_pkt
  3268. * @idx: index of cr data in cr_pkt
  3269. *
  3270. * @Return: None
  3271. */
  3272. void
  3273. q2spi_copy_cr_data_to_pkt(struct q2spi_packet *q2spi_pkt, struct q2spi_cr_packet *cr_pkt, int idx)
  3274. {
  3275. memcpy(&q2spi_pkt->cr_hdr, &cr_pkt->cr_hdr[idx], sizeof(struct q2spi_cr_header));
  3276. memcpy(&q2spi_pkt->cr_var3, &cr_pkt->var3_pkt[idx], sizeof(struct q2spi_client_dma_pkt));
  3277. memcpy(&q2spi_pkt->cr_bulk, &cr_pkt->bulk_pkt[idx],
  3278. sizeof(struct q2spi_client_bulk_access_pkt));
  3279. q2spi_pkt->cr_hdr_type = cr_pkt->cr_hdr_type[idx];
  3280. Q2SPI_DEBUG(q2spi_pkt->q2spi, "%s q2spi_pkt:%p cr_hdr_type:%d\n",
  3281. __func__, q2spi_pkt, q2spi_pkt->cr_hdr_type);
  3282. }
  3283. /*
  3284. * q2spi_send_system_mem_access() - Sends system memory access read command
  3285. *
  3286. * @q2spi: pointer to q2spi_geni driver data
  3287. * @q2spi_pkt: double pointer to q2spi_packet
  3288. * @cr_pkt: pointer to cr_pkt
  3289. * @idx: index of cr data in cr_pkt
  3290. *
  3291. * @Return: None
  3292. */
  3293. int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet **q2spi_pkt,
  3294. struct q2spi_cr_packet *cr_pkt, int idx)
  3295. {
  3296. unsigned long xfer_timeout = 0;
  3297. long timeout = 0;
  3298. struct q2spi_request q2spi_req;
  3299. int ret = 0, retries = Q2SPI_RESP_BUF_RETRIES;
  3300. unsigned int dw_len;
  3301. u8 flow_id = cr_pkt->var3_pkt[idx].flow_id;
  3302. dw_len = ((cr_pkt->var3_pkt[idx].dw_len_part3 << 12) & 0xFF) |
  3303. ((cr_pkt->var3_pkt[idx].dw_len_part2 << 4) & 0xFF) |
  3304. cr_pkt->var3_pkt[idx].dw_len_part1;
  3305. q2spi_req.data_len = (dw_len * 4) + 4;
  3306. Q2SPI_DEBUG(q2spi, "%s dw_len:%d data_len:%d\n", __func__, dw_len, q2spi_req.data_len);
  3307. q2spi_req.cmd = DATA_READ;
  3308. q2spi_req.addr = 0;
  3309. q2spi_req.end_point = 0;
  3310. q2spi_req.proto_ind = 0;
  3311. q2spi_req.priority = 0;
  3312. q2spi_req.flow_id = flow_id;
  3313. q2spi_req.sync = 0;
  3314. while (retries--) {
  3315. mutex_lock(&q2spi->queue_lock);
  3316. ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, q2spi_pkt);
  3317. mutex_unlock(&q2spi->queue_lock);
  3318. if (ret == -ENOMEM) {
  3319. Q2SPI_ERROR(q2spi, "%s Err ret:%d\n", __func__, ret);
  3320. /* sleep sometime to let application consume the pending rx buffers */
  3321. usleep_range(125000, 150000);
  3322. } else {
  3323. break;
  3324. }
  3325. }
  3326. if (ret < 0) {
  3327. Q2SPI_ERROR(q2spi, "%s Err ret:%d\n", __func__, ret);
  3328. return ret;
  3329. }
  3330. q2spi_copy_cr_data_to_pkt((struct q2spi_packet *)*q2spi_pkt, cr_pkt, idx);
  3331. ((struct q2spi_packet *)*q2spi_pkt)->var3_data_len = q2spi_req.data_len;
  3332. if (atomic_read(&q2spi->sma_wr_pending)) {
  3333. Q2SPI_DEBUG(q2spi, "%s sma write is pending wait\n", __func__);
  3334. xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET);
  3335. timeout = wait_for_completion_interruptible_timeout(&q2spi->sma_wr_comp,
  3336. xfer_timeout);
  3337. if (timeout <= 0) {
  3338. Q2SPI_ERROR(q2spi, "%s Err timeout %ld for sma write complete\n",
  3339. __func__, timeout);
  3340. atomic_set(&q2spi->doorbell_pending, 0);
  3341. if (timeout == -ERESTARTSYS) {
  3342. q2spi_sys_restart = true;
  3343. return -ERESTARTSYS;
  3344. }
  3345. return -ETIMEDOUT;
  3346. }
  3347. }
  3348. ret = __q2spi_send_messages(q2spi, (void *)*q2spi_pkt);
  3349. Q2SPI_DEBUG(q2spi, "%s End ret:%d %d\n", __func__, ret, __LINE__);
  3350. return ret;
  3351. }
  3352. /*
  3353. * q2spi_find_pkt_by_flow_id() - finds q2spi packet in tx_queue_list and copies cr data
  3354. *
  3355. * @q2spi: pointer to q2spi_geni driver data
  3356. * @cr_pkt: pointer to cr_pkt
  3357. * @idx: index of var3_pkt which contains flow_id received from target
  3358. *
  3359. * @Return: None
  3360. */
  3361. void q2spi_find_pkt_by_flow_id(struct q2spi_geni *q2spi, struct q2spi_cr_packet *cr_pkt, int idx)
  3362. {
  3363. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp1, *q2spi_pkt_tmp2;
  3364. u8 flow_id = cr_pkt->var3_pkt[idx].flow_id;
  3365. mutex_lock(&q2spi->queue_lock);
  3366. list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) {
  3367. if (q2spi_pkt_tmp1->flow_id == flow_id) {
  3368. q2spi_pkt = q2spi_pkt_tmp1;
  3369. q2spi_copy_cr_data_to_pkt(q2spi_pkt, cr_pkt, idx);
  3370. break;
  3371. }
  3372. }
  3373. mutex_unlock(&q2spi->queue_lock);
  3374. if (q2spi_pkt) {
  3375. Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n",
  3376. __func__, q2spi_pkt, flow_id);
  3377. /* wakeup HRF flow which is waiting for this CR doorbell */
  3378. complete_all(&q2spi_pkt->wait_for_db);
  3379. return;
  3380. }
  3381. Q2SPI_DEBUG(q2spi, "%s Err q2spi_pkt not found for flow_id %d\n", __func__, flow_id);
  3382. }
  3383. /*
  3384. * q2spi_set_data_avail_in_pkt() - sets q2spi packet state to data availability
  3385. *
  3386. * @q2spi: pointer to q2spi_geni driver data
  3387. * @cr_pkt: pointer to cr_pkt containing bulk_pkt
  3388. * @idx: index of bulk_pkt which contains flow_id received from target
  3389. *
  3390. * @Return: None
  3391. */
  3392. void q2spi_set_data_avail_in_pkt(struct q2spi_geni *q2spi, struct q2spi_cr_packet *cr_pkt, int idx)
  3393. {
  3394. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp1, *q2spi_pkt_tmp2;
  3395. u8 flow_id = cr_pkt->bulk_pkt[idx].flow_id;
  3396. mutex_lock(&q2spi->queue_lock);
  3397. list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) {
  3398. if (q2spi_pkt_tmp1->flow_id == flow_id) {
  3399. if (q2spi_pkt_tmp1->cr_var3.flow_id == flow_id &&
  3400. q2spi_pkt_tmp1->state == IN_USE) {
  3401. q2spi_pkt = q2spi_pkt_tmp1;
  3402. Q2SPI_DEBUG(q2spi, "%s Found CR PKT for flow_id:%d",
  3403. __func__, flow_id);
  3404. break;
  3405. }
  3406. }
  3407. }
  3408. mutex_unlock(&q2spi->queue_lock);
  3409. if (q2spi_pkt) {
  3410. Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d",
  3411. __func__, q2spi_pkt, flow_id);
  3412. q2spi_pkt->state = DATA_AVAIL;
  3413. } else {
  3414. Q2SPI_DEBUG(q2spi, "%s Err q2spi_pkt not found for flow_id %d\n",
  3415. __func__, flow_id);
  3416. }
  3417. }
  3418. /*
  3419. * q2spi_complete_bulk_status() - calls completion for q2spi packet waiting on bulk_wait
  3420. *
  3421. * @q2spi: pointer to q2spi_geni driver data
  3422. * @cr_pkt: pointer to cr_pkt containing bulk_pkt
  3423. * @idx: index of bulk_pkt which contains flow_id received from target
  3424. *
  3425. * @Return: None
  3426. */
  3427. void q2spi_complete_bulk_status(struct q2spi_geni *q2spi, struct q2spi_cr_packet *cr_pkt, int idx)
  3428. {
  3429. struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp1, *q2spi_pkt_tmp2;
  3430. u8 flow_id = cr_pkt->bulk_pkt[idx].flow_id;
  3431. mutex_lock(&q2spi->queue_lock);
  3432. list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) {
  3433. if (q2spi_pkt_tmp1->flow_id == flow_id) {
  3434. q2spi_pkt = q2spi_pkt_tmp1;
  3435. break;
  3436. }
  3437. }
  3438. mutex_unlock(&q2spi->queue_lock);
  3439. if (q2spi_pkt) {
  3440. Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n",
  3441. __func__, q2spi_pkt, flow_id);
  3442. q2spi_copy_cr_data_to_pkt(q2spi_pkt, cr_pkt, idx);
  3443. complete_all(&q2spi_pkt->bulk_wait);
  3444. } else {
  3445. Q2SPI_DEBUG(q2spi, "%s Err q2spi_pkt not found for flow_id %d\n",
  3446. __func__, flow_id);
  3447. }
  3448. }
  3449. /*
  3450. * q2spi_handle_wakeup_work() - worker function which handles remote wakeup flow for q2spi
  3451. * @work: pointer to work_struct
  3452. *
  3453. * Return: None
  3454. */
  3455. static void q2spi_handle_wakeup_work(struct work_struct *work)
  3456. {
  3457. struct q2spi_geni *q2spi =
  3458. container_of(work, struct q2spi_geni, q2spi_wakeup_work);
  3459. int ret = 0;
  3460. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d q2spi:%p\n", __func__, current->pid, q2spi);
  3461. ret = q2spi_geni_runtime_resume(q2spi->dev);
  3462. if (ret)
  3463. Q2SPI_ERROR(q2spi, "%s Runtime resume Failed:%d\n", __func__, ret);
  3464. }
  3465. /*
  3466. * q2spi_handle_doorbell_work() - worker function which handles doorbell flow for q2spi
  3467. *
  3468. * @work: pointer to work_struct
  3469. *
  3470. * Return: None
  3471. */
  3472. static void q2spi_handle_doorbell_work(struct work_struct *work)
  3473. {
  3474. struct q2spi_geni *q2spi =
  3475. container_of(work, struct q2spi_geni, q2spi_doorbell_work);
  3476. struct q2spi_cr_packet *q2spi_cr_pkt = NULL;
  3477. struct q2spi_packet *q2spi_pkt;
  3478. int ret = 0, i = 0, no_of_crs = 0;
  3479. bool sys_mem_access = false;
  3480. long timeout = 0;
  3481. Q2SPI_DEBUG(q2spi, "%s Enter PID=%d q2spi:%p PM get_sync count:%d\n", __func__,
  3482. current->pid, q2spi, atomic_read(&q2spi->dev->power.usage_count));
  3483. ret = pm_runtime_get_sync(q2spi->dev);
  3484. if (ret < 0) {
  3485. Q2SPI_ERROR(q2spi, "%s Err for PM get\n", __func__);
  3486. pm_runtime_put_noidle(q2spi->dev);
  3487. pm_runtime_set_suspended(q2spi->dev);
  3488. return;
  3489. }
  3490. Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__,
  3491. atomic_read(&q2spi->dev->power.usage_count));
  3492. /* wait for RX dma channel TCE 0x22 to get CR body in RX DMA buffer */
  3493. ret = check_gsi_transfer_completion_db_rx(q2spi);
  3494. if (ret) {
  3495. Q2SPI_DEBUG(q2spi, "%s db rx completion timeout: %d\n", __func__, ret);
  3496. goto exit_doorbell_work;
  3497. }
  3498. /* Extract cr hdr info from doorbell rx dma buffer */
  3499. q2spi_cr_pkt = q2spi_prepare_cr_pkt(q2spi);
  3500. if (!q2spi_cr_pkt) {
  3501. Q2SPI_DEBUG(q2spi, "Err q2spi_prepare_cr_pkt failed\n");
  3502. goto exit_doorbell_work;
  3503. }
  3504. q2spi_unmap_doorbell_rx_buf(q2spi);
  3505. reinit_completion(&q2spi->sma_wait);
  3506. no_of_crs = q2spi_cr_pkt->num_valid_crs;
  3507. Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt:%p q2spi_db_xfer:%p db_xfer_rx_buf:%p\n",
  3508. __func__, q2spi_cr_pkt, q2spi->db_xfer, q2spi->db_xfer->rx_buf);
  3509. for (i = 0; i < no_of_crs; i++) {
  3510. Q2SPI_DEBUG(q2spi, "%s i=%d CR Header CMD 0x%x\n",
  3511. __func__, i, q2spi_cr_pkt->cr_hdr[i].cmd);
  3512. if (q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS ||
  3513. q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_RD_ACCESS) {
  3514. if (q2spi_cr_pkt->cr_hdr[i].flow) {
  3515. /* C->M flow */
  3516. Q2SPI_DEBUG(q2spi,
  3517. "%s cr_hdr ADDR_LESS_WR/RD_ACCESS with client flow opcode:%d\n",
  3518. __func__, q2spi_cr_pkt->cr_hdr[i].cmd);
  3519. Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d len_part3:%d\n",
  3520. __func__, q2spi_cr_pkt->var3_pkt[i].dw_len_part1,
  3521. q2spi_cr_pkt->var3_pkt[i].dw_len_part2,
  3522. q2spi_cr_pkt->var3_pkt[i].dw_len_part3);
  3523. if (!q2spi_send_system_mem_access(
  3524. q2spi, &q2spi_pkt, q2spi_cr_pkt, i))
  3525. sys_mem_access = true;
  3526. } else {
  3527. /* M->C flow */
  3528. Q2SPI_DEBUG(q2spi,
  3529. "%s cr_hdr ADDR_LESS_WR/RD with Host flow, opcode:%d\n",
  3530. __func__, q2spi_cr_pkt->cr_hdr[i].cmd);
  3531. if (q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS) {
  3532. q2spi_find_pkt_by_flow_id(q2spi, q2spi_cr_pkt, i);
  3533. Q2SPI_DEBUG(q2spi, "%s cmd:%d doorbell CR for Host flow\n",
  3534. __func__, q2spi_cr_pkt->cr_hdr[i].cmd);
  3535. }
  3536. }
  3537. } else if (q2spi_cr_pkt->cr_hdr[i].cmd == BULK_ACCESS_STATUS) {
  3538. if (q2spi_cr_pkt->bulk_pkt[i].flow_id >= 0x8) {
  3539. Q2SPI_DEBUG(q2spi, "%s Bulk status with Client Flow ID\n",
  3540. __func__);
  3541. q2spi_set_data_avail_in_pkt(q2spi, q2spi_cr_pkt, i);
  3542. q2spi_notify_data_avail_for_client(q2spi);
  3543. } else {
  3544. Q2SPI_DEBUG(q2spi, "%s Bulk status with host Flow ID:%d\n",
  3545. __func__, q2spi_cr_pkt->bulk_pkt[i].flow_id);
  3546. q2spi_complete_bulk_status(q2spi, q2spi_cr_pkt, i);
  3547. }
  3548. } else if (q2spi_cr_pkt->cr_hdr[i].cmd == CR_EXTENSION) {
  3549. Q2SPI_DEBUG(q2spi, "%s Extended CR from Client\n", __func__);
  3550. }
  3551. if (sys_mem_access) {
  3552. Q2SPI_DEBUG(q2spi, "%s waiting on sma_wait\n", __func__);
  3553. /* Block on read_wq until sma complete */
  3554. timeout = wait_for_completion_interruptible_timeout
  3555. (&q2spi->sma_wait, msecs_to_jiffies(XFER_TIMEOUT_OFFSET));
  3556. if (timeout <= 0) {
  3557. Q2SPI_DEBUG(q2spi, "%s Err wait interrupted timeout:%ld\n",
  3558. __func__, timeout);
  3559. if (timeout == -ERESTARTSYS) {
  3560. q2spi_sys_restart = true;
  3561. return;
  3562. }
  3563. goto exit_doorbell_work;
  3564. }
  3565. }
  3566. }
  3567. q2spi_kfree(q2spi, q2spi_cr_pkt, __LINE__);
  3568. /*
  3569. * get one rx buffer from allocated pool and
  3570. * map to gsi to ready for next doorbell.
  3571. */
  3572. if (q2spi_map_doorbell_rx_buf(q2spi))
  3573. Q2SPI_DEBUG(q2spi, "Err failed to alloc RX DMA buf");
  3574. exit_doorbell_work:
  3575. pm_runtime_mark_last_busy(q2spi->dev);
  3576. Q2SPI_DEBUG(q2spi, "%s PM before put_autosuspend count:%d\n",
  3577. __func__, atomic_read(&q2spi->dev->power.usage_count));
  3578. pm_runtime_put_autosuspend(q2spi->dev);
  3579. Q2SPI_DEBUG(q2spi, "%s End PID=%d PM after put_autosuspend count:%d\n",
  3580. __func__, current->pid, atomic_read(&q2spi->dev->power.usage_count));
  3581. }
  3582. /*
  3583. * q2spi_chardev_destroy - Destroys character devices which are created as part of probe
  3584. *
  3585. * @q2spi: pointer to q2spi_geni driver data
  3586. *
  3587. * Return: None
  3588. */
  3589. static void q2spi_chardev_destroy(struct q2spi_geni *q2spi)
  3590. {
  3591. int i;
  3592. for (i = 0; i < MAX_DEV; i++) {
  3593. device_destroy(q2spi->chrdev.q2spi_class, MKDEV(q2spi_cdev_major, i));
  3594. cdev_del(&q2spi->chrdev.cdev[i]);
  3595. }
  3596. class_destroy(q2spi->chrdev.q2spi_class);
  3597. unregister_chrdev_region(MKDEV(q2spi_cdev_major, 0), MINORMASK);
  3598. Q2SPI_DEBUG(q2spi, "%s End %d\n", __func__, q2spi_cdev_major);
  3599. }
  3600. /**
  3601. * q2spi_sleep_config - Q2SPI sleep config
  3602. *
  3603. * @q2spi: pointer to q2spi_geni driver data
  3604. * @pdev: pointer to platform device
  3605. *
  3606. * Return: 0 for success, negative number for error condition.
  3607. */
  3608. static int q2spi_sleep_config(struct q2spi_geni *q2spi, struct platform_device *pdev)
  3609. {
  3610. int ret = 0;
  3611. q2spi->wake_clk_gpio = of_get_named_gpio(pdev->dev.of_node, "clk-pin", 0);
  3612. if (!gpio_is_valid(q2spi->wake_clk_gpio)) {
  3613. dev_err(&pdev->dev, "failed to parse clk gpio\n");
  3614. return -EINVAL;
  3615. }
  3616. ret = devm_gpio_request(q2spi->dev, q2spi->wake_clk_gpio, "Q2SPI_CLK_GPIO");
  3617. if (ret) {
  3618. Q2SPI_ERROR(q2spi, "%s:Err failed to request GPIO-%d\n", __func__,
  3619. q2spi->wake_clk_gpio);
  3620. return ret;
  3621. }
  3622. q2spi->wake_mosi_gpio = of_get_named_gpio(pdev->dev.of_node, "mosi-pin", 0);
  3623. if (!gpio_is_valid(q2spi->wake_mosi_gpio)) {
  3624. dev_err(&pdev->dev, "failed to parse mosi gpio\n");
  3625. return -EINVAL;
  3626. }
  3627. ret = devm_gpio_request(q2spi->dev, q2spi->wake_mosi_gpio, "Q2SPI_MOSI_GPIO");
  3628. if (ret) {
  3629. Q2SPI_ERROR(q2spi, "%s:Err failed to request GPIO-%d\n", __func__,
  3630. q2spi->wake_mosi_gpio);
  3631. return ret;
  3632. }
  3633. Q2SPI_DEBUG(q2spi, "%s Q2SPI clk_gpio:%d mosi_gpio:%d\n",
  3634. __func__, q2spi->wake_clk_gpio, q2spi->wake_mosi_gpio);
  3635. q2spi->wakeup_wq = alloc_workqueue("%s", WQ_HIGHPRI, 1, dev_name(q2spi->dev));
  3636. if (!q2spi->wakeup_wq) {
  3637. Q2SPI_ERROR(q2spi, "Err failed to wakeup workqueue");
  3638. return -ENOMEM;
  3639. }
  3640. INIT_WORK(&q2spi->q2spi_wakeup_work, q2spi_handle_wakeup_work);
  3641. /* To use the Doorbel pin as wakeup irq */
  3642. q2spi->doorbell_irq = platform_get_irq(pdev, 1);
  3643. Q2SPI_DEBUG(q2spi, "%s Q2SPI doorbell_irq:%d\n", __func__, q2spi->doorbell_irq);
  3644. irq_set_status_flags(q2spi->doorbell_irq, IRQ_NOAUTOEN);
  3645. ret = devm_request_irq(q2spi->dev, q2spi->doorbell_irq,
  3646. q2spi_geni_wakeup_isr, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
  3647. "doorbell_wakeup", q2spi);
  3648. if (unlikely(ret)) {
  3649. Q2SPI_ERROR(q2spi, "%s:Failed to get WakeIRQ ret%d\n", __func__, ret);
  3650. return -ENOMEM;
  3651. }
  3652. q2spi_geni_resources_off(q2spi);
  3653. pm_runtime_use_autosuspend(q2spi->dev);
  3654. pm_runtime_set_autosuspend_delay(q2spi->dev, Q2SPI_AUTOSUSPEND_DELAY);
  3655. pm_runtime_set_suspended(q2spi->dev);
  3656. pm_runtime_enable(q2spi->dev);
  3657. return ret;
  3658. }
  3659. /**
  3660. * q2spi_geni_probe - Q2SPI interface driver probe function
  3661. * @pdev: Q2SPI Serial Engine to probe.
  3662. *
  3663. * Allocates basic resources for QUPv3 SE which supports q2spi
  3664. * and then register a range of char device numbers. Also
  3665. * invoke methods for Qupv3 SE and Q2SPI protocol
  3666. * specific Initialization.
  3667. *
  3668. * Return: 0 for success, negative number for error condition.
  3669. */
  3670. static int q2spi_geni_probe(struct platform_device *pdev)
  3671. {
  3672. struct device *dev = &pdev->dev;
  3673. struct resource *res;
  3674. struct q2spi_geni *q2spi;
  3675. int ret = 0;
  3676. pr_info("boot_kpi: M - DRIVER GENI_Q2SPI Init\n");
  3677. q2spi = devm_kzalloc(dev, sizeof(*q2spi), GFP_KERNEL);
  3678. if (!q2spi) {
  3679. ret = -ENOMEM;
  3680. goto q2spi_err;
  3681. }
  3682. q2spi->dev = dev;
  3683. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3684. if (!res) {
  3685. dev_err(dev, "Err getting IO region\n");
  3686. ret = -EINVAL;
  3687. goto q2spi_err;
  3688. }
  3689. q2spi->base = devm_ioremap_resource(dev, res);
  3690. if (IS_ERR(q2spi->base)) {
  3691. ret = PTR_ERR(q2spi->base);
  3692. dev_err(dev, "Err ioremap fail %d\n", ret);
  3693. goto q2spi_err;
  3694. }
  3695. q2spi->irq = platform_get_irq(pdev, 0);
  3696. if (q2spi->irq < 0) {
  3697. dev_err(dev, "Err for irq get %d\n", ret);
  3698. ret = q2spi->irq;
  3699. goto q2spi_err;
  3700. }
  3701. irq_set_status_flags(q2spi->irq, IRQ_NOAUTOEN);
  3702. ret = devm_request_irq(dev, q2spi->irq, q2spi_geni_irq,
  3703. IRQF_TRIGGER_HIGH, dev_name(dev), q2spi);
  3704. if (ret) {
  3705. dev_err(dev, "Err Failed to request irq %d\n", ret);
  3706. goto q2spi_err;
  3707. }
  3708. q2spi->se.dev = dev;
  3709. q2spi->se.wrapper = dev_get_drvdata(dev->parent);
  3710. if (!q2spi->se.wrapper) {
  3711. dev_err(dev, "Err SE Wrapper is NULL, deferring probe\n");
  3712. ret = -EPROBE_DEFER;
  3713. goto q2spi_err;
  3714. }
  3715. q2spi->ipc = ipc_log_context_create(15, dev_name(dev), 0);
  3716. if (!q2spi->ipc && IS_ENABLED(CONFIG_IPC_LOGGING))
  3717. dev_err(dev, "Error creating IPC logs\n");
  3718. q2spi->se.base = q2spi->base;
  3719. if (q2spi_max_speed) {
  3720. q2spi->max_speed_hz = q2spi_max_speed;
  3721. } else {
  3722. if (of_property_read_u32(pdev->dev.of_node, "q2spi-max-frequency",
  3723. &q2spi->max_speed_hz)) {
  3724. Q2SPI_ERROR(q2spi, "Err Max frequency not specified\n");
  3725. ret = -EINVAL;
  3726. goto q2spi_err;
  3727. }
  3728. }
  3729. q2spi->wrapper_dev = dev->parent;
  3730. Q2SPI_DEBUG(q2spi, "%s q2spi:0x%p q2spi_cdev:0x%p dev:0x%p, p_dev:0x%p",
  3731. __func__, q2spi, q2spi->chrdev, dev, &pdev->dev);
  3732. Q2SPI_INFO(q2spi, "%s dev:%s q2spi_max_freq:%uhz\n",
  3733. __func__, dev_name(q2spi->dev), q2spi->max_speed_hz);
  3734. ret = dma_set_mask_and_coherent(dev, (u64)DMA_BIT_MASK(48));
  3735. if (ret) {
  3736. Q2SPI_INFO(q2spi, "%s dma_set_mask_and_coherent with DMA_BIT_MASK(48) failed",
  3737. __func__);
  3738. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  3739. if (ret) {
  3740. Q2SPI_ERROR(q2spi, "Err could not set DMA mask\n");
  3741. goto q2spi_err;
  3742. }
  3743. }
  3744. ret = q2spi_chardev_create(q2spi);
  3745. if (ret)
  3746. goto q2spi_err;
  3747. ret = q2spi_clks_get(q2spi);
  3748. if (ret) {
  3749. Q2SPI_ERROR(q2spi, "Err clks get failed\n");
  3750. goto chardev_destroy;
  3751. }
  3752. ret = q2spi_pinctrl_config(pdev, q2spi);
  3753. if (ret)
  3754. goto chardev_destroy;
  3755. mutex_init(&q2spi->geni_resource_lock);
  3756. ret = q2spi_geni_resources_on(q2spi);
  3757. if (ret)
  3758. goto chardev_destroy;
  3759. ret = q2spi_geni_init(q2spi);
  3760. if (ret) {
  3761. Q2SPI_DEBUG(q2spi, "Geni init failed %d\n", ret);
  3762. goto resources_off;
  3763. }
  3764. init_waitqueue_head(&q2spi->readq);
  3765. init_waitqueue_head(&q2spi->read_wq);
  3766. INIT_LIST_HEAD(&q2spi->tx_queue_list);
  3767. mutex_init(&q2spi->gsi_lock);
  3768. spin_lock_init(&q2spi->txn_lock);
  3769. mutex_init(&q2spi->queue_lock);
  3770. mutex_init(&q2spi->send_msgs_lock);
  3771. spin_lock_init(&q2spi->cr_queue_lock);
  3772. q2spi->port_release = true;
  3773. q2spi->kworker = kthread_create_worker(0, "kthread_q2spi");
  3774. if (IS_ERR(q2spi->kworker)) {
  3775. Q2SPI_ERROR(q2spi, "Err failed to create message pump kworker\n");
  3776. ret = PTR_ERR(q2spi->kworker);
  3777. q2spi->kworker = NULL;
  3778. goto geni_deinit;
  3779. }
  3780. kthread_init_work(&q2spi->send_messages, q2spi_send_messages);
  3781. init_completion(&q2spi->tx_cb);
  3782. init_completion(&q2spi->rx_cb);
  3783. init_completion(&q2spi->db_rx_cb);
  3784. init_completion(&q2spi->db_setup_wait);
  3785. init_completion(&q2spi->sma_wait);
  3786. init_completion(&q2spi->wait_for_ext_cr);
  3787. atomic_set(&q2spi->sma_wr_pending, 0);
  3788. atomic_set(&q2spi->sma_rd_pending, 0);
  3789. init_completion(&q2spi->sma_wr_comp);
  3790. init_completion(&q2spi->sma_rd_comp);
  3791. /* Pre allocate buffers for transfers */
  3792. ret = q2spi_pre_alloc_buffers(q2spi);
  3793. if (ret) {
  3794. Q2SPI_ERROR(q2spi, "Err failed to alloc buffers");
  3795. goto destroy_worker;
  3796. }
  3797. q2spi->db_q2spi_pkt = devm_kzalloc(q2spi->dev, sizeof(struct q2spi_packet), GFP_KERNEL);
  3798. if (!q2spi->db_q2spi_pkt) {
  3799. ret = -ENOMEM;
  3800. Q2SPI_ERROR(q2spi, "%s Err failed to allocated db_q2spi_pkt\n", __func__);
  3801. goto free_buf;
  3802. }
  3803. q2spi->db_q2spi_pkt->q2spi = q2spi;
  3804. q2spi->db_xfer = devm_kzalloc(q2spi->dev, sizeof(struct q2spi_dma_transfer), GFP_KERNEL);
  3805. if (!q2spi->db_xfer) {
  3806. ret = -ENOMEM;
  3807. Q2SPI_ERROR(q2spi, "Err failed to alloc db_xfer buffer");
  3808. goto free_buf;
  3809. }
  3810. q2spi->doorbell_wq = alloc_workqueue("%s", WQ_HIGHPRI, 1, dev_name(dev));
  3811. if (!q2spi->doorbell_wq) {
  3812. ret = -ENOMEM;
  3813. Q2SPI_ERROR(q2spi, "Err failed to allocate workqueue");
  3814. goto free_buf;
  3815. }
  3816. INIT_WORK(&q2spi->q2spi_doorbell_work, q2spi_handle_doorbell_work);
  3817. dev_dbg(dev, "Q2SPI GENI SE Driver probed\n");
  3818. platform_set_drvdata(pdev, q2spi);
  3819. if (device_create_file(dev, &dev_attr_max_dump_size))
  3820. Q2SPI_INFO(q2spi, "Unable to create device file for max_dump_size\n");
  3821. q2spi->max_data_dump_size = Q2SPI_DATA_DUMP_SIZE;
  3822. if (q2spi_sleep_config(q2spi, pdev))
  3823. goto free_buf;
  3824. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_default);
  3825. if (ret) {
  3826. Q2SPI_ERROR(q2spi, "%s: Err failed to pinctrl state to gpio, ret:%d\n",
  3827. __func__, ret);
  3828. goto free_buf;
  3829. }
  3830. Q2SPI_INFO(q2spi, "%s Q2SPI GENI SE Driver probe\n", __func__);
  3831. pr_info("boot_kpi: M - DRIVER GENI_Q2SPI Ready\n");
  3832. return 0;
  3833. free_buf:
  3834. q2spi_free_dma_buf(q2spi);
  3835. destroy_worker:
  3836. idr_destroy(&q2spi->tid_idr);
  3837. if (q2spi->kworker) {
  3838. kthread_destroy_worker(q2spi->kworker);
  3839. q2spi->kworker = NULL;
  3840. }
  3841. geni_deinit:
  3842. q2spi_geni_gsi_release(q2spi);
  3843. resources_off:
  3844. q2spi_geni_resources_off(q2spi);
  3845. chardev_destroy:
  3846. q2spi_chardev_destroy(q2spi);
  3847. q2spi_err:
  3848. Q2SPI_ERROR(q2spi, "%s: failed, ret:%d\n", __func__, ret);
  3849. q2spi->base = NULL;
  3850. return ret;
  3851. }
  3852. static int q2spi_geni_remove(struct platform_device *pdev)
  3853. {
  3854. struct q2spi_geni *q2spi = platform_get_drvdata(pdev);
  3855. pr_info("%s q2spi=0x%p\n", __func__, q2spi);
  3856. if (!q2spi || !q2spi->base)
  3857. return 0;
  3858. device_remove_file(&pdev->dev, &dev_attr_max_dump_size);
  3859. destroy_workqueue(q2spi->wakeup_wq);
  3860. destroy_workqueue(q2spi->doorbell_wq);
  3861. q2spi_free_dma_buf(q2spi);
  3862. idr_destroy(&q2spi->tid_idr);
  3863. if (q2spi->kworker) {
  3864. kthread_destroy_worker(q2spi->kworker);
  3865. q2spi->kworker = NULL;
  3866. }
  3867. q2spi_geni_gsi_release(q2spi);
  3868. q2spi_geni_resources_off(q2spi);
  3869. q2spi_chardev_destroy(q2spi);
  3870. if (q2spi->ipc)
  3871. ipc_log_context_destroy(q2spi->ipc);
  3872. return 0;
  3873. }
  3874. /**
  3875. * get_q2spi - get q2spi pointer from device
  3876. * @dev: Device pointer
  3877. *
  3878. * Return: return q2spi pointer
  3879. */
  3880. static struct q2spi_geni *get_q2spi(struct device *dev)
  3881. {
  3882. struct platform_device *pdev = to_platform_device(dev);
  3883. struct q2spi_geni *q2spi = platform_get_drvdata(pdev);
  3884. return q2spi;
  3885. }
  3886. /*
  3887. * q2spi_wakeup_hw_through_gpio - Preparing HW wake up through Mosi and clock GPIO's
  3888. *
  3889. * @q2spi: Pointer to main q2spi_geni structure
  3890. *
  3891. * Return: 0 for succes, else returns linux error codes
  3892. */
  3893. int q2spi_wakeup_hw_through_gpio(struct q2spi_geni *q2spi)
  3894. {
  3895. int ret = 0;
  3896. q2spi_unmap_doorbell_rx_buf(q2spi);
  3897. Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell only\n", __func__);
  3898. geni_gsi_disconnect_doorbell_stop_ch(q2spi->gsi->tx_c, false);
  3899. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_default);
  3900. if (ret) {
  3901. Q2SPI_ERROR(q2spi, "%s: Err failed to pinctrl state to gpio, ret:%d\n",
  3902. __func__, ret);
  3903. return ret;
  3904. }
  3905. gpio_direction_output(q2spi->wake_clk_gpio, 0);
  3906. /* Set Clock pin to Low */
  3907. gpio_set_value(q2spi->wake_clk_gpio, 0);
  3908. Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__,
  3909. q2spi->wake_clk_gpio, gpio_get_value(q2spi->wake_clk_gpio));
  3910. gpio_direction_output(q2spi->wake_mosi_gpio, 0);
  3911. /* Set Mosi pin to High */
  3912. gpio_set_value(q2spi->wake_mosi_gpio, 1);
  3913. Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__,
  3914. q2spi->wake_mosi_gpio, gpio_get_value(q2spi->wake_mosi_gpio));
  3915. usleep_range(2000, 5000);
  3916. /* Set back Mosi pin to Low */
  3917. gpio_set_value(q2spi->wake_mosi_gpio, 0);
  3918. Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__,
  3919. q2spi->wake_mosi_gpio, gpio_get_value(q2spi->wake_mosi_gpio));
  3920. gpio_direction_input(q2spi->wake_mosi_gpio);
  3921. gpio_direction_input(q2spi->wake_clk_gpio);
  3922. /* Bring back to QUP mode by switching to the pinctrl active state */
  3923. ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_active);
  3924. if (ret) {
  3925. Q2SPI_DEBUG(q2spi, "%s: Err failed to pinctrl state to active, ret:%d\n",
  3926. __func__, ret);
  3927. return ret;
  3928. }
  3929. ret = q2spi_map_doorbell_rx_buf(q2spi);
  3930. return ret;
  3931. }
  3932. /*
  3933. * q2spi_put_slave_to_sleep - Put HW to sleep by sending HRF sleep command
  3934. *
  3935. * @q2spi: Pointer to main q2spi_geni structure
  3936. *
  3937. * Return: 0 for succes;
  3938. */
  3939. static int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi)
  3940. {
  3941. struct q2spi_packet *q2spi_pkt;
  3942. struct q2spi_request q2spi_req;
  3943. int ret = 0;
  3944. Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid);
  3945. q2spi_req.cmd = Q2SPI_HRF_SLEEP_CMD;
  3946. q2spi_req.sync = 1;
  3947. mutex_lock(&q2spi->queue_lock);
  3948. ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &q2spi_pkt);
  3949. mutex_unlock(&q2spi->queue_lock);
  3950. if (ret < 0) {
  3951. Q2SPI_ERROR(q2spi, "%s Err failed ret:%d\n", __func__, ret);
  3952. return ret;
  3953. }
  3954. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid);
  3955. q2spi_pkt->is_client_sleep_pkt = true;
  3956. __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0);
  3957. q2spi_pkt->state = IN_DELETION;
  3958. q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid);
  3959. q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt);
  3960. q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__);
  3961. Q2SPI_DEBUG(q2spi, "%s: PID=%d End\n", __func__, current->pid);
  3962. return ret;
  3963. }
  3964. static void q2spi_geni_shutdown(struct platform_device *pdev)
  3965. {
  3966. struct q2spi_geni *q2spi = platform_get_drvdata(pdev);
  3967. pr_info("%s q2spi=0x%p\n", __func__, q2spi);
  3968. if (!q2spi || !q2spi->base)
  3969. return;
  3970. q2spi_sys_restart = true;
  3971. q2spi->port_release = true;
  3972. }
  3973. static int q2spi_geni_runtime_suspend(struct device *dev)
  3974. {
  3975. struct q2spi_geni *q2spi = NULL;
  3976. int ret = 0;
  3977. if (q2spi_sys_restart)
  3978. return -ERESTARTSYS;
  3979. q2spi = get_q2spi(dev);
  3980. if (!q2spi) {
  3981. Q2SPI_DEBUG(q2spi, "%s Err q2spi is NULL, PID=%d\n", __func__, current->pid);
  3982. return -EINVAL;
  3983. }
  3984. Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid);
  3985. if (!atomic_read(&q2spi->is_suspend)) {
  3986. Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid);
  3987. q2spi_geni_resources_on(q2spi);
  3988. q2spi_put_slave_to_sleep(q2spi);
  3989. /* Delay to ensure any pending CRs in progress are consumed */
  3990. usleep_range(10000, 20000);
  3991. q2spi_tx_queue_status(q2spi);
  3992. q2spi_unmap_doorbell_rx_buf(q2spi);
  3993. Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell cmd\n", __func__);
  3994. geni_gsi_disconnect_doorbell_stop_ch(q2spi->gsi->tx_c, true);
  3995. ret = irq_set_irq_wake(q2spi->doorbell_irq, 1);
  3996. if (unlikely(ret))
  3997. Q2SPI_ERROR(q2spi, "%s Err Failed to set IRQ wake\n", __func__);
  3998. q2spi_geni_resources_off(q2spi);
  3999. atomic_set(&q2spi->is_suspend, 1);
  4000. if (!ret)
  4001. enable_irq(q2spi->doorbell_irq);
  4002. else
  4003. Q2SPI_ERROR(q2spi, "%s Err Failed to enable_irq\n", __func__);
  4004. }
  4005. return ret;
  4006. }
  4007. static int q2spi_geni_runtime_resume(struct device *dev)
  4008. {
  4009. struct q2spi_geni *q2spi = NULL;
  4010. int ret = 0;
  4011. if (q2spi_sys_restart)
  4012. return -ERESTARTSYS;
  4013. q2spi = get_q2spi(dev);
  4014. if (!q2spi) {
  4015. Q2SPI_DEBUG(q2spi, "%s Err q2spi is NULL, PID=%d\n", __func__, current->pid);
  4016. return -EINVAL;
  4017. }
  4018. Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid);
  4019. if (atomic_read(&q2spi->is_suspend)) {
  4020. Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid);
  4021. if (q2spi_geni_resources_on(q2spi))
  4022. return -EIO;
  4023. disable_irq(q2spi->doorbell_irq);
  4024. ret = irq_set_irq_wake(q2spi->doorbell_irq, 0);
  4025. if (unlikely(ret))
  4026. Q2SPI_ERROR(q2spi, "%s Failed to set IRQ wake\n", __func__);
  4027. Q2SPI_DEBUG(q2spi, "%s Sending start channel\n", __func__);
  4028. geni_gsi_ch_start(q2spi->gsi->tx_c);
  4029. /* Clear is_suspend to map doorbell buffers */
  4030. atomic_set(&q2spi->is_suspend, 0);
  4031. ret = q2spi_map_doorbell_rx_buf(q2spi);
  4032. Q2SPI_DEBUG(q2spi, "%s End ret:%d\n", __func__, ret);
  4033. }
  4034. return ret;
  4035. }
  4036. static int q2spi_geni_resume(struct device *dev)
  4037. {
  4038. struct q2spi_geni *q2spi = get_q2spi(dev);
  4039. Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid);
  4040. Q2SPI_DEBUG(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__,
  4041. pm_runtime_status_suspended(dev), atomic_read(&q2spi->is_suspend),
  4042. pm_runtime_enabled(dev));
  4043. return 0;
  4044. }
  4045. static int q2spi_geni_suspend(struct device *dev)
  4046. {
  4047. struct q2spi_geni *q2spi = get_q2spi(dev);
  4048. int ret = 0;
  4049. Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid);
  4050. Q2SPI_DEBUG(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__,
  4051. pm_runtime_status_suspended(dev), atomic_read(&q2spi->is_suspend),
  4052. pm_runtime_enabled(dev));
  4053. if (pm_runtime_status_suspended(dev)) {
  4054. Q2SPI_DEBUG(q2spi, "%s: suspended state\n", __func__);
  4055. return ret;
  4056. }
  4057. if (q2spi && !atomic_read(&q2spi->is_suspend)) {
  4058. Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid);
  4059. ret = q2spi_geni_runtime_suspend(dev);
  4060. if (ret) {
  4061. Q2SPI_DEBUG(q2spi, "%s: Err runtime_suspend fail\n", __func__);
  4062. } else {
  4063. pm_runtime_disable(dev);
  4064. pm_runtime_set_suspended(dev);
  4065. pm_runtime_enable(dev);
  4066. }
  4067. }
  4068. return ret;
  4069. }
  4070. static const struct dev_pm_ops q2spi_geni_pm_ops = {
  4071. SET_RUNTIME_PM_OPS(q2spi_geni_runtime_suspend,
  4072. q2spi_geni_runtime_resume, NULL)
  4073. SET_SYSTEM_SLEEP_PM_OPS(q2spi_geni_suspend, q2spi_geni_resume)
  4074. };
  4075. static const struct of_device_id q2spi_geni_dt_match[] = {
  4076. { .compatible = "qcom,q2spi-msm-geni" },
  4077. {}
  4078. };
  4079. MODULE_DEVICE_TABLE(of, q2spi_geni_dt_match);
  4080. static struct platform_driver q2spi_geni_driver = {
  4081. .probe = q2spi_geni_probe,
  4082. .remove = q2spi_geni_remove,
  4083. .shutdown = q2spi_geni_shutdown,
  4084. .driver = {
  4085. .name = "q2spi_msm_geni",
  4086. .pm = &q2spi_geni_pm_ops,
  4087. .of_match_table = q2spi_geni_dt_match,
  4088. },
  4089. };
  4090. static int __init q2spi_dev_init(void)
  4091. {
  4092. int ret = 0;
  4093. ret = platform_driver_register(&q2spi_geni_driver);
  4094. if (ret)
  4095. pr_err("register platform driver failed, ret [%d]\n", ret);
  4096. pr_info("%s end ret:%d\n", __func__, ret);
  4097. return ret;
  4098. }
  4099. static void __exit q2spi_dev_exit(void)
  4100. {
  4101. pr_info("%s PID=%d\n", __func__, current->pid);
  4102. platform_driver_unregister(&q2spi_geni_driver);
  4103. }
  4104. module_param(q2spi_max_speed, uint, 0644);
  4105. MODULE_PARM_DESC(q2spi_max_speed, "Maximum speed setting\n");
  4106. module_init(q2spi_dev_init);
  4107. module_exit(q2spi_dev_exit);
  4108. MODULE_LICENSE("GPL");
  4109. MODULE_ALIAS("platform:q2spi_geni");