spi.c 122 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. // SPI init/core code
  3. //
  4. // Copyright (C) 2005 David Brownell
  5. // Copyright (C) 2008 Secret Lab Technologies Ltd.
  6. #include <linux/kernel.h>
  7. #include <linux/device.h>
  8. #include <linux/init.h>
  9. #include <linux/cache.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/mutex.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/clk/clk-conf.h>
  16. #include <linux/slab.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #include <linux/gpio/consumer.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/pm_domain.h>
  23. #include <linux/property.h>
  24. #include <linux/export.h>
  25. #include <linux/sched/rt.h>
  26. #include <uapi/linux/sched/types.h>
  27. #include <linux/delay.h>
  28. #include <linux/kthread.h>
  29. #include <linux/ioport.h>
  30. #include <linux/acpi.h>
  31. #include <linux/highmem.h>
  32. #include <linux/idr.h>
  33. #include <linux/platform_data/x86/apple.h>
  34. #include <linux/ptp_clock_kernel.h>
  35. #include <linux/percpu.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/spi.h>
  38. EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
  39. EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
  40. #include "internals.h"
  41. static DEFINE_IDR(spi_master_idr);
  42. static void spidev_release(struct device *dev)
  43. {
  44. struct spi_device *spi = to_spi_device(dev);
  45. spi_controller_put(spi->controller);
  46. kfree(spi->driver_override);
  47. free_percpu(spi->pcpu_statistics);
  48. kfree(spi);
  49. }
  50. static ssize_t
  51. modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  52. {
  53. const struct spi_device *spi = to_spi_device(dev);
  54. int len;
  55. len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  56. if (len != -ENODEV)
  57. return len;
  58. return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  59. }
  60. static DEVICE_ATTR_RO(modalias);
  61. static ssize_t driver_override_store(struct device *dev,
  62. struct device_attribute *a,
  63. const char *buf, size_t count)
  64. {
  65. struct spi_device *spi = to_spi_device(dev);
  66. int ret;
  67. ret = driver_set_override(dev, &spi->driver_override, buf, count);
  68. if (ret)
  69. return ret;
  70. return count;
  71. }
  72. static ssize_t driver_override_show(struct device *dev,
  73. struct device_attribute *a, char *buf)
  74. {
  75. const struct spi_device *spi = to_spi_device(dev);
  76. ssize_t len;
  77. device_lock(dev);
  78. len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
  79. device_unlock(dev);
  80. return len;
  81. }
  82. static DEVICE_ATTR_RW(driver_override);
  83. static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
  84. {
  85. struct spi_statistics __percpu *pcpu_stats;
  86. if (dev)
  87. pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
  88. else
  89. pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
  90. if (pcpu_stats) {
  91. int cpu;
  92. for_each_possible_cpu(cpu) {
  93. struct spi_statistics *stat;
  94. stat = per_cpu_ptr(pcpu_stats, cpu);
  95. u64_stats_init(&stat->syncp);
  96. }
  97. }
  98. return pcpu_stats;
  99. }
  100. #define spi_pcpu_stats_totalize(ret, in, field) \
  101. do { \
  102. int i; \
  103. ret = 0; \
  104. for_each_possible_cpu(i) { \
  105. const struct spi_statistics *pcpu_stats; \
  106. u64 inc; \
  107. unsigned int start; \
  108. pcpu_stats = per_cpu_ptr(in, i); \
  109. do { \
  110. start = u64_stats_fetch_begin_irq( \
  111. &pcpu_stats->syncp); \
  112. inc = u64_stats_read(&pcpu_stats->field); \
  113. } while (u64_stats_fetch_retry_irq( \
  114. &pcpu_stats->syncp, start)); \
  115. ret += inc; \
  116. } \
  117. } while (0)
  118. #define SPI_STATISTICS_ATTRS(field, file) \
  119. static ssize_t spi_controller_##field##_show(struct device *dev, \
  120. struct device_attribute *attr, \
  121. char *buf) \
  122. { \
  123. struct spi_controller *ctlr = container_of(dev, \
  124. struct spi_controller, dev); \
  125. return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
  126. } \
  127. static struct device_attribute dev_attr_spi_controller_##field = { \
  128. .attr = { .name = file, .mode = 0444 }, \
  129. .show = spi_controller_##field##_show, \
  130. }; \
  131. static ssize_t spi_device_##field##_show(struct device *dev, \
  132. struct device_attribute *attr, \
  133. char *buf) \
  134. { \
  135. struct spi_device *spi = to_spi_device(dev); \
  136. return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
  137. } \
  138. static struct device_attribute dev_attr_spi_device_##field = { \
  139. .attr = { .name = file, .mode = 0444 }, \
  140. .show = spi_device_##field##_show, \
  141. }
  142. #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
  143. static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
  144. char *buf) \
  145. { \
  146. ssize_t len; \
  147. u64 val; \
  148. spi_pcpu_stats_totalize(val, stat, field); \
  149. len = sysfs_emit(buf, "%llu\n", val); \
  150. return len; \
  151. } \
  152. SPI_STATISTICS_ATTRS(name, file)
  153. #define SPI_STATISTICS_SHOW(field) \
  154. SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
  155. field)
  156. SPI_STATISTICS_SHOW(messages);
  157. SPI_STATISTICS_SHOW(transfers);
  158. SPI_STATISTICS_SHOW(errors);
  159. SPI_STATISTICS_SHOW(timedout);
  160. SPI_STATISTICS_SHOW(spi_sync);
  161. SPI_STATISTICS_SHOW(spi_sync_immediate);
  162. SPI_STATISTICS_SHOW(spi_async);
  163. SPI_STATISTICS_SHOW(bytes);
  164. SPI_STATISTICS_SHOW(bytes_rx);
  165. SPI_STATISTICS_SHOW(bytes_tx);
  166. #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
  167. SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
  168. "transfer_bytes_histo_" number, \
  169. transfer_bytes_histo[index])
  170. SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
  171. SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
  172. SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
  173. SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
  174. SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
  175. SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
  176. SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
  177. SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
  178. SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
  179. SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
  180. SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
  181. SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
  182. SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
  183. SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
  184. SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
  185. SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
  186. SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
  187. SPI_STATISTICS_SHOW(transfers_split_maxsize);
  188. static struct attribute *spi_dev_attrs[] = {
  189. &dev_attr_modalias.attr,
  190. &dev_attr_driver_override.attr,
  191. NULL,
  192. };
  193. static const struct attribute_group spi_dev_group = {
  194. .attrs = spi_dev_attrs,
  195. };
  196. static struct attribute *spi_device_statistics_attrs[] = {
  197. &dev_attr_spi_device_messages.attr,
  198. &dev_attr_spi_device_transfers.attr,
  199. &dev_attr_spi_device_errors.attr,
  200. &dev_attr_spi_device_timedout.attr,
  201. &dev_attr_spi_device_spi_sync.attr,
  202. &dev_attr_spi_device_spi_sync_immediate.attr,
  203. &dev_attr_spi_device_spi_async.attr,
  204. &dev_attr_spi_device_bytes.attr,
  205. &dev_attr_spi_device_bytes_rx.attr,
  206. &dev_attr_spi_device_bytes_tx.attr,
  207. &dev_attr_spi_device_transfer_bytes_histo0.attr,
  208. &dev_attr_spi_device_transfer_bytes_histo1.attr,
  209. &dev_attr_spi_device_transfer_bytes_histo2.attr,
  210. &dev_attr_spi_device_transfer_bytes_histo3.attr,
  211. &dev_attr_spi_device_transfer_bytes_histo4.attr,
  212. &dev_attr_spi_device_transfer_bytes_histo5.attr,
  213. &dev_attr_spi_device_transfer_bytes_histo6.attr,
  214. &dev_attr_spi_device_transfer_bytes_histo7.attr,
  215. &dev_attr_spi_device_transfer_bytes_histo8.attr,
  216. &dev_attr_spi_device_transfer_bytes_histo9.attr,
  217. &dev_attr_spi_device_transfer_bytes_histo10.attr,
  218. &dev_attr_spi_device_transfer_bytes_histo11.attr,
  219. &dev_attr_spi_device_transfer_bytes_histo12.attr,
  220. &dev_attr_spi_device_transfer_bytes_histo13.attr,
  221. &dev_attr_spi_device_transfer_bytes_histo14.attr,
  222. &dev_attr_spi_device_transfer_bytes_histo15.attr,
  223. &dev_attr_spi_device_transfer_bytes_histo16.attr,
  224. &dev_attr_spi_device_transfers_split_maxsize.attr,
  225. NULL,
  226. };
  227. static const struct attribute_group spi_device_statistics_group = {
  228. .name = "statistics",
  229. .attrs = spi_device_statistics_attrs,
  230. };
  231. static const struct attribute_group *spi_dev_groups[] = {
  232. &spi_dev_group,
  233. &spi_device_statistics_group,
  234. NULL,
  235. };
  236. static struct attribute *spi_controller_statistics_attrs[] = {
  237. &dev_attr_spi_controller_messages.attr,
  238. &dev_attr_spi_controller_transfers.attr,
  239. &dev_attr_spi_controller_errors.attr,
  240. &dev_attr_spi_controller_timedout.attr,
  241. &dev_attr_spi_controller_spi_sync.attr,
  242. &dev_attr_spi_controller_spi_sync_immediate.attr,
  243. &dev_attr_spi_controller_spi_async.attr,
  244. &dev_attr_spi_controller_bytes.attr,
  245. &dev_attr_spi_controller_bytes_rx.attr,
  246. &dev_attr_spi_controller_bytes_tx.attr,
  247. &dev_attr_spi_controller_transfer_bytes_histo0.attr,
  248. &dev_attr_spi_controller_transfer_bytes_histo1.attr,
  249. &dev_attr_spi_controller_transfer_bytes_histo2.attr,
  250. &dev_attr_spi_controller_transfer_bytes_histo3.attr,
  251. &dev_attr_spi_controller_transfer_bytes_histo4.attr,
  252. &dev_attr_spi_controller_transfer_bytes_histo5.attr,
  253. &dev_attr_spi_controller_transfer_bytes_histo6.attr,
  254. &dev_attr_spi_controller_transfer_bytes_histo7.attr,
  255. &dev_attr_spi_controller_transfer_bytes_histo8.attr,
  256. &dev_attr_spi_controller_transfer_bytes_histo9.attr,
  257. &dev_attr_spi_controller_transfer_bytes_histo10.attr,
  258. &dev_attr_spi_controller_transfer_bytes_histo11.attr,
  259. &dev_attr_spi_controller_transfer_bytes_histo12.attr,
  260. &dev_attr_spi_controller_transfer_bytes_histo13.attr,
  261. &dev_attr_spi_controller_transfer_bytes_histo14.attr,
  262. &dev_attr_spi_controller_transfer_bytes_histo15.attr,
  263. &dev_attr_spi_controller_transfer_bytes_histo16.attr,
  264. &dev_attr_spi_controller_transfers_split_maxsize.attr,
  265. NULL,
  266. };
  267. static const struct attribute_group spi_controller_statistics_group = {
  268. .name = "statistics",
  269. .attrs = spi_controller_statistics_attrs,
  270. };
  271. static const struct attribute_group *spi_master_groups[] = {
  272. &spi_controller_statistics_group,
  273. NULL,
  274. };
  275. static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
  276. struct spi_transfer *xfer,
  277. struct spi_controller *ctlr)
  278. {
  279. int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
  280. struct spi_statistics *stats;
  281. if (l2len < 0)
  282. l2len = 0;
  283. get_cpu();
  284. stats = this_cpu_ptr(pcpu_stats);
  285. u64_stats_update_begin(&stats->syncp);
  286. u64_stats_inc(&stats->transfers);
  287. u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
  288. u64_stats_add(&stats->bytes, xfer->len);
  289. if ((xfer->tx_buf) &&
  290. (xfer->tx_buf != ctlr->dummy_tx))
  291. u64_stats_add(&stats->bytes_tx, xfer->len);
  292. if ((xfer->rx_buf) &&
  293. (xfer->rx_buf != ctlr->dummy_rx))
  294. u64_stats_add(&stats->bytes_rx, xfer->len);
  295. u64_stats_update_end(&stats->syncp);
  296. put_cpu();
  297. }
  298. /*
  299. * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  300. * and the sysfs version makes coldplug work too.
  301. */
  302. static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
  303. {
  304. while (id->name[0]) {
  305. if (!strcmp(name, id->name))
  306. return id;
  307. id++;
  308. }
  309. return NULL;
  310. }
  311. const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  312. {
  313. const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  314. return spi_match_id(sdrv->id_table, sdev->modalias);
  315. }
  316. EXPORT_SYMBOL_GPL(spi_get_device_id);
  317. static int spi_match_device(struct device *dev, struct device_driver *drv)
  318. {
  319. const struct spi_device *spi = to_spi_device(dev);
  320. const struct spi_driver *sdrv = to_spi_driver(drv);
  321. /* Check override first, and if set, only use the named driver */
  322. if (spi->driver_override)
  323. return strcmp(spi->driver_override, drv->name) == 0;
  324. /* Attempt an OF style match */
  325. if (of_driver_match_device(dev, drv))
  326. return 1;
  327. /* Then try ACPI */
  328. if (acpi_driver_match_device(dev, drv))
  329. return 1;
  330. if (sdrv->id_table)
  331. return !!spi_match_id(sdrv->id_table, spi->modalias);
  332. return strcmp(spi->modalias, drv->name) == 0;
  333. }
  334. static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
  335. {
  336. const struct spi_device *spi = to_spi_device(dev);
  337. int rc;
  338. rc = acpi_device_uevent_modalias(dev, env);
  339. if (rc != -ENODEV)
  340. return rc;
  341. return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
  342. }
  343. static int spi_probe(struct device *dev)
  344. {
  345. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  346. struct spi_device *spi = to_spi_device(dev);
  347. int ret;
  348. ret = of_clk_set_defaults(dev->of_node, false);
  349. if (ret)
  350. return ret;
  351. if (dev->of_node) {
  352. spi->irq = of_irq_get(dev->of_node, 0);
  353. if (spi->irq == -EPROBE_DEFER)
  354. return -EPROBE_DEFER;
  355. if (spi->irq < 0)
  356. spi->irq = 0;
  357. }
  358. ret = dev_pm_domain_attach(dev, true);
  359. if (ret)
  360. return ret;
  361. if (sdrv->probe) {
  362. ret = sdrv->probe(spi);
  363. if (ret)
  364. dev_pm_domain_detach(dev, true);
  365. }
  366. return ret;
  367. }
  368. static void spi_remove(struct device *dev)
  369. {
  370. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  371. if (sdrv->remove)
  372. sdrv->remove(to_spi_device(dev));
  373. dev_pm_domain_detach(dev, true);
  374. }
  375. static void spi_shutdown(struct device *dev)
  376. {
  377. if (dev->driver) {
  378. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  379. if (sdrv->shutdown)
  380. sdrv->shutdown(to_spi_device(dev));
  381. }
  382. }
  383. struct bus_type spi_bus_type = {
  384. .name = "spi",
  385. .dev_groups = spi_dev_groups,
  386. .match = spi_match_device,
  387. .uevent = spi_uevent,
  388. .probe = spi_probe,
  389. .remove = spi_remove,
  390. .shutdown = spi_shutdown,
  391. };
  392. EXPORT_SYMBOL_GPL(spi_bus_type);
  393. /**
  394. * __spi_register_driver - register a SPI driver
  395. * @owner: owner module of the driver to register
  396. * @sdrv: the driver to register
  397. * Context: can sleep
  398. *
  399. * Return: zero on success, else a negative error code.
  400. */
  401. int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
  402. {
  403. sdrv->driver.owner = owner;
  404. sdrv->driver.bus = &spi_bus_type;
  405. /*
  406. * For Really Good Reasons we use spi: modaliases not of:
  407. * modaliases for DT so module autoloading won't work if we
  408. * don't have a spi_device_id as well as a compatible string.
  409. */
  410. if (sdrv->driver.of_match_table) {
  411. const struct of_device_id *of_id;
  412. for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
  413. of_id++) {
  414. const char *of_name;
  415. /* Strip off any vendor prefix */
  416. of_name = strnchr(of_id->compatible,
  417. sizeof(of_id->compatible), ',');
  418. if (of_name)
  419. of_name++;
  420. else
  421. of_name = of_id->compatible;
  422. if (sdrv->id_table) {
  423. const struct spi_device_id *spi_id;
  424. spi_id = spi_match_id(sdrv->id_table, of_name);
  425. if (spi_id)
  426. continue;
  427. } else {
  428. if (strcmp(sdrv->driver.name, of_name) == 0)
  429. continue;
  430. }
  431. pr_warn("SPI driver %s has no spi_device_id for %s\n",
  432. sdrv->driver.name, of_id->compatible);
  433. }
  434. }
  435. return driver_register(&sdrv->driver);
  436. }
  437. EXPORT_SYMBOL_GPL(__spi_register_driver);
  438. /*-------------------------------------------------------------------------*/
  439. /*
  440. * SPI devices should normally not be created by SPI device drivers; that
  441. * would make them board-specific. Similarly with SPI controller drivers.
  442. * Device registration normally goes into like arch/.../mach.../board-YYY.c
  443. * with other readonly (flashable) information about mainboard devices.
  444. */
  445. struct boardinfo {
  446. struct list_head list;
  447. struct spi_board_info board_info;
  448. };
  449. static LIST_HEAD(board_list);
  450. static LIST_HEAD(spi_controller_list);
  451. /*
  452. * Used to protect add/del operation for board_info list and
  453. * spi_controller list, and their matching process also used
  454. * to protect object of type struct idr.
  455. */
  456. static DEFINE_MUTEX(board_lock);
  457. /**
  458. * spi_alloc_device - Allocate a new SPI device
  459. * @ctlr: Controller to which device is connected
  460. * Context: can sleep
  461. *
  462. * Allows a driver to allocate and initialize a spi_device without
  463. * registering it immediately. This allows a driver to directly
  464. * fill the spi_device with device parameters before calling
  465. * spi_add_device() on it.
  466. *
  467. * Caller is responsible to call spi_add_device() on the returned
  468. * spi_device structure to add it to the SPI controller. If the caller
  469. * needs to discard the spi_device without adding it, then it should
  470. * call spi_dev_put() on it.
  471. *
  472. * Return: a pointer to the new device, or NULL.
  473. */
  474. struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
  475. {
  476. struct spi_device *spi;
  477. if (!spi_controller_get(ctlr))
  478. return NULL;
  479. spi = kzalloc(sizeof(*spi), GFP_KERNEL);
  480. if (!spi) {
  481. spi_controller_put(ctlr);
  482. return NULL;
  483. }
  484. spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
  485. if (!spi->pcpu_statistics) {
  486. kfree(spi);
  487. spi_controller_put(ctlr);
  488. return NULL;
  489. }
  490. spi->master = spi->controller = ctlr;
  491. spi->dev.parent = &ctlr->dev;
  492. spi->dev.bus = &spi_bus_type;
  493. spi->dev.release = spidev_release;
  494. spi->mode = ctlr->buswidth_override_bits;
  495. device_initialize(&spi->dev);
  496. return spi;
  497. }
  498. EXPORT_SYMBOL_GPL(spi_alloc_device);
  499. static void spi_dev_set_name(struct spi_device *spi)
  500. {
  501. struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
  502. if (adev) {
  503. dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
  504. return;
  505. }
  506. dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
  507. spi->chip_select);
  508. }
  509. static int spi_dev_check(struct device *dev, void *data)
  510. {
  511. struct spi_device *spi = to_spi_device(dev);
  512. struct spi_device *new_spi = data;
  513. if (spi->controller == new_spi->controller &&
  514. spi->chip_select == new_spi->chip_select)
  515. return -EBUSY;
  516. return 0;
  517. }
  518. static void spi_cleanup(struct spi_device *spi)
  519. {
  520. if (spi->controller->cleanup)
  521. spi->controller->cleanup(spi);
  522. }
  523. static int __spi_add_device(struct spi_device *spi)
  524. {
  525. struct spi_controller *ctlr = spi->controller;
  526. struct device *dev = ctlr->dev.parent;
  527. int status;
  528. /*
  529. * We need to make sure there's no other device with this
  530. * chipselect **BEFORE** we call setup(), else we'll trash
  531. * its configuration.
  532. */
  533. status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
  534. if (status) {
  535. dev_err(dev, "chipselect %d already in use\n",
  536. spi->chip_select);
  537. return status;
  538. }
  539. /* Controller may unregister concurrently */
  540. if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
  541. !device_is_registered(&ctlr->dev)) {
  542. return -ENODEV;
  543. }
  544. if (ctlr->cs_gpiods)
  545. spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
  546. /*
  547. * Drivers may modify this initial i/o setup, but will
  548. * normally rely on the device being setup. Devices
  549. * using SPI_CS_HIGH can't coexist well otherwise...
  550. */
  551. status = spi_setup(spi);
  552. if (status < 0) {
  553. dev_err(dev, "can't setup %s, status %d\n",
  554. dev_name(&spi->dev), status);
  555. return status;
  556. }
  557. /* Device may be bound to an active driver when this returns */
  558. status = device_add(&spi->dev);
  559. if (status < 0) {
  560. dev_err(dev, "can't add %s, status %d\n",
  561. dev_name(&spi->dev), status);
  562. spi_cleanup(spi);
  563. } else {
  564. dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
  565. }
  566. return status;
  567. }
  568. /**
  569. * spi_add_device - Add spi_device allocated with spi_alloc_device
  570. * @spi: spi_device to register
  571. *
  572. * Companion function to spi_alloc_device. Devices allocated with
  573. * spi_alloc_device can be added onto the spi bus with this function.
  574. *
  575. * Return: 0 on success; negative errno on failure
  576. */
  577. int spi_add_device(struct spi_device *spi)
  578. {
  579. struct spi_controller *ctlr = spi->controller;
  580. struct device *dev = ctlr->dev.parent;
  581. int status;
  582. /* Chipselects are numbered 0..max; validate. */
  583. if (spi->chip_select >= ctlr->num_chipselect) {
  584. dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
  585. ctlr->num_chipselect);
  586. return -EINVAL;
  587. }
  588. /* Set the bus ID string */
  589. spi_dev_set_name(spi);
  590. mutex_lock(&ctlr->add_lock);
  591. status = __spi_add_device(spi);
  592. mutex_unlock(&ctlr->add_lock);
  593. return status;
  594. }
  595. EXPORT_SYMBOL_GPL(spi_add_device);
  596. static int spi_add_device_locked(struct spi_device *spi)
  597. {
  598. struct spi_controller *ctlr = spi->controller;
  599. struct device *dev = ctlr->dev.parent;
  600. /* Chipselects are numbered 0..max; validate. */
  601. if (spi->chip_select >= ctlr->num_chipselect) {
  602. dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
  603. ctlr->num_chipselect);
  604. return -EINVAL;
  605. }
  606. /* Set the bus ID string */
  607. spi_dev_set_name(spi);
  608. WARN_ON(!mutex_is_locked(&ctlr->add_lock));
  609. return __spi_add_device(spi);
  610. }
  611. /**
  612. * spi_new_device - instantiate one new SPI device
  613. * @ctlr: Controller to which device is connected
  614. * @chip: Describes the SPI device
  615. * Context: can sleep
  616. *
  617. * On typical mainboards, this is purely internal; and it's not needed
  618. * after board init creates the hard-wired devices. Some development
  619. * platforms may not be able to use spi_register_board_info though, and
  620. * this is exported so that for example a USB or parport based adapter
  621. * driver could add devices (which it would learn about out-of-band).
  622. *
  623. * Return: the new device, or NULL.
  624. */
  625. struct spi_device *spi_new_device(struct spi_controller *ctlr,
  626. struct spi_board_info *chip)
  627. {
  628. struct spi_device *proxy;
  629. int status;
  630. /*
  631. * NOTE: caller did any chip->bus_num checks necessary.
  632. *
  633. * Also, unless we change the return value convention to use
  634. * error-or-pointer (not NULL-or-pointer), troubleshootability
  635. * suggests syslogged diagnostics are best here (ugh).
  636. */
  637. proxy = spi_alloc_device(ctlr);
  638. if (!proxy)
  639. return NULL;
  640. WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
  641. proxy->chip_select = chip->chip_select;
  642. proxy->max_speed_hz = chip->max_speed_hz;
  643. proxy->mode = chip->mode;
  644. proxy->irq = chip->irq;
  645. strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
  646. proxy->dev.platform_data = (void *) chip->platform_data;
  647. proxy->controller_data = chip->controller_data;
  648. proxy->controller_state = NULL;
  649. if (chip->swnode) {
  650. status = device_add_software_node(&proxy->dev, chip->swnode);
  651. if (status) {
  652. dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
  653. chip->modalias, status);
  654. goto err_dev_put;
  655. }
  656. }
  657. status = spi_add_device(proxy);
  658. if (status < 0)
  659. goto err_dev_put;
  660. return proxy;
  661. err_dev_put:
  662. device_remove_software_node(&proxy->dev);
  663. spi_dev_put(proxy);
  664. return NULL;
  665. }
  666. EXPORT_SYMBOL_GPL(spi_new_device);
  667. /**
  668. * spi_unregister_device - unregister a single SPI device
  669. * @spi: spi_device to unregister
  670. *
  671. * Start making the passed SPI device vanish. Normally this would be handled
  672. * by spi_unregister_controller().
  673. */
  674. void spi_unregister_device(struct spi_device *spi)
  675. {
  676. if (!spi)
  677. return;
  678. if (spi->dev.of_node) {
  679. of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
  680. of_node_put(spi->dev.of_node);
  681. }
  682. if (ACPI_COMPANION(&spi->dev))
  683. acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
  684. device_remove_software_node(&spi->dev);
  685. device_del(&spi->dev);
  686. spi_cleanup(spi);
  687. put_device(&spi->dev);
  688. }
  689. EXPORT_SYMBOL_GPL(spi_unregister_device);
  690. static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
  691. struct spi_board_info *bi)
  692. {
  693. struct spi_device *dev;
  694. if (ctlr->bus_num != bi->bus_num)
  695. return;
  696. dev = spi_new_device(ctlr, bi);
  697. if (!dev)
  698. dev_err(ctlr->dev.parent, "can't create new device for %s\n",
  699. bi->modalias);
  700. }
  701. /**
  702. * spi_register_board_info - register SPI devices for a given board
  703. * @info: array of chip descriptors
  704. * @n: how many descriptors are provided
  705. * Context: can sleep
  706. *
  707. * Board-specific early init code calls this (probably during arch_initcall)
  708. * with segments of the SPI device table. Any device nodes are created later,
  709. * after the relevant parent SPI controller (bus_num) is defined. We keep
  710. * this table of devices forever, so that reloading a controller driver will
  711. * not make Linux forget about these hard-wired devices.
  712. *
  713. * Other code can also call this, e.g. a particular add-on board might provide
  714. * SPI devices through its expansion connector, so code initializing that board
  715. * would naturally declare its SPI devices.
  716. *
  717. * The board info passed can safely be __initdata ... but be careful of
  718. * any embedded pointers (platform_data, etc), they're copied as-is.
  719. *
  720. * Return: zero on success, else a negative error code.
  721. */
  722. int spi_register_board_info(struct spi_board_info const *info, unsigned n)
  723. {
  724. struct boardinfo *bi;
  725. int i;
  726. if (!n)
  727. return 0;
  728. bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
  729. if (!bi)
  730. return -ENOMEM;
  731. for (i = 0; i < n; i++, bi++, info++) {
  732. struct spi_controller *ctlr;
  733. memcpy(&bi->board_info, info, sizeof(*info));
  734. mutex_lock(&board_lock);
  735. list_add_tail(&bi->list, &board_list);
  736. list_for_each_entry(ctlr, &spi_controller_list, list)
  737. spi_match_controller_to_boardinfo(ctlr,
  738. &bi->board_info);
  739. mutex_unlock(&board_lock);
  740. }
  741. return 0;
  742. }
  743. /*-------------------------------------------------------------------------*/
  744. /* Core methods for SPI resource management */
  745. /**
  746. * spi_res_alloc - allocate a spi resource that is life-cycle managed
  747. * during the processing of a spi_message while using
  748. * spi_transfer_one
  749. * @spi: the spi device for which we allocate memory
  750. * @release: the release code to execute for this resource
  751. * @size: size to alloc and return
  752. * @gfp: GFP allocation flags
  753. *
  754. * Return: the pointer to the allocated data
  755. *
  756. * This may get enhanced in the future to allocate from a memory pool
  757. * of the @spi_device or @spi_controller to avoid repeated allocations.
  758. */
  759. static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
  760. size_t size, gfp_t gfp)
  761. {
  762. struct spi_res *sres;
  763. sres = kzalloc(sizeof(*sres) + size, gfp);
  764. if (!sres)
  765. return NULL;
  766. INIT_LIST_HEAD(&sres->entry);
  767. sres->release = release;
  768. return sres->data;
  769. }
  770. /**
  771. * spi_res_free - free an spi resource
  772. * @res: pointer to the custom data of a resource
  773. */
  774. static void spi_res_free(void *res)
  775. {
  776. struct spi_res *sres = container_of(res, struct spi_res, data);
  777. if (!res)
  778. return;
  779. WARN_ON(!list_empty(&sres->entry));
  780. kfree(sres);
  781. }
  782. /**
  783. * spi_res_add - add a spi_res to the spi_message
  784. * @message: the spi message
  785. * @res: the spi_resource
  786. */
  787. static void spi_res_add(struct spi_message *message, void *res)
  788. {
  789. struct spi_res *sres = container_of(res, struct spi_res, data);
  790. WARN_ON(!list_empty(&sres->entry));
  791. list_add_tail(&sres->entry, &message->resources);
  792. }
  793. /**
  794. * spi_res_release - release all spi resources for this message
  795. * @ctlr: the @spi_controller
  796. * @message: the @spi_message
  797. */
  798. static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
  799. {
  800. struct spi_res *res, *tmp;
  801. list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
  802. if (res->release)
  803. res->release(ctlr, message, res->data);
  804. list_del(&res->entry);
  805. kfree(res);
  806. }
  807. }
  808. /*-------------------------------------------------------------------------*/
  809. static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
  810. {
  811. bool activate = enable;
  812. /*
  813. * Avoid calling into the driver (or doing delays) if the chip select
  814. * isn't actually changing from the last time this was called.
  815. */
  816. if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
  817. (!enable && spi->controller->last_cs != spi->chip_select)) &&
  818. (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
  819. return;
  820. trace_spi_set_cs(spi, activate);
  821. spi->controller->last_cs = enable ? spi->chip_select : -1;
  822. spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
  823. if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
  824. spi_delay_exec(&spi->cs_hold, NULL);
  825. }
  826. if (spi->mode & SPI_CS_HIGH)
  827. enable = !enable;
  828. if (spi->cs_gpiod) {
  829. if (!(spi->mode & SPI_NO_CS)) {
  830. /*
  831. * Historically ACPI has no means of the GPIO polarity and
  832. * thus the SPISerialBus() resource defines it on the per-chip
  833. * basis. In order to avoid a chain of negations, the GPIO
  834. * polarity is considered being Active High. Even for the cases
  835. * when _DSD() is involved (in the updated versions of ACPI)
  836. * the GPIO CS polarity must be defined Active High to avoid
  837. * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
  838. * into account.
  839. */
  840. if (has_acpi_companion(&spi->dev))
  841. gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
  842. else
  843. /* Polarity handled by GPIO library */
  844. gpiod_set_value_cansleep(spi->cs_gpiod, activate);
  845. }
  846. /* Some SPI masters need both GPIO CS & slave_select */
  847. if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
  848. spi->controller->set_cs)
  849. spi->controller->set_cs(spi, !enable);
  850. } else if (spi->controller->set_cs) {
  851. spi->controller->set_cs(spi, !enable);
  852. }
  853. if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
  854. if (activate)
  855. spi_delay_exec(&spi->cs_setup, NULL);
  856. else
  857. spi_delay_exec(&spi->cs_inactive, NULL);
  858. }
  859. }
  860. #ifdef CONFIG_HAS_DMA
  861. static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
  862. struct sg_table *sgt, void *buf, size_t len,
  863. enum dma_data_direction dir, unsigned long attrs)
  864. {
  865. const bool vmalloced_buf = is_vmalloc_addr(buf);
  866. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  867. #ifdef CONFIG_HIGHMEM
  868. const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
  869. (unsigned long)buf < (PKMAP_BASE +
  870. (LAST_PKMAP * PAGE_SIZE)));
  871. #else
  872. const bool kmap_buf = false;
  873. #endif
  874. int desc_len;
  875. int sgs;
  876. struct page *vm_page;
  877. struct scatterlist *sg;
  878. void *sg_buf;
  879. size_t min;
  880. int i, ret;
  881. if (vmalloced_buf || kmap_buf) {
  882. desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
  883. sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
  884. } else if (virt_addr_valid(buf)) {
  885. desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
  886. sgs = DIV_ROUND_UP(len, desc_len);
  887. } else {
  888. return -EINVAL;
  889. }
  890. ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
  891. if (ret != 0)
  892. return ret;
  893. sg = &sgt->sgl[0];
  894. for (i = 0; i < sgs; i++) {
  895. if (vmalloced_buf || kmap_buf) {
  896. /*
  897. * Next scatterlist entry size is the minimum between
  898. * the desc_len and the remaining buffer length that
  899. * fits in a page.
  900. */
  901. min = min_t(size_t, desc_len,
  902. min_t(size_t, len,
  903. PAGE_SIZE - offset_in_page(buf)));
  904. if (vmalloced_buf)
  905. vm_page = vmalloc_to_page(buf);
  906. else
  907. vm_page = kmap_to_page(buf);
  908. if (!vm_page) {
  909. sg_free_table(sgt);
  910. return -ENOMEM;
  911. }
  912. sg_set_page(sg, vm_page,
  913. min, offset_in_page(buf));
  914. } else {
  915. min = min_t(size_t, len, desc_len);
  916. sg_buf = buf;
  917. sg_set_buf(sg, sg_buf, min);
  918. }
  919. buf += min;
  920. len -= min;
  921. sg = sg_next(sg);
  922. }
  923. ret = dma_map_sgtable(dev, sgt, dir, attrs);
  924. if (ret < 0) {
  925. sg_free_table(sgt);
  926. return ret;
  927. }
  928. return 0;
  929. }
  930. int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
  931. struct sg_table *sgt, void *buf, size_t len,
  932. enum dma_data_direction dir)
  933. {
  934. return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
  935. }
  936. static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
  937. struct device *dev, struct sg_table *sgt,
  938. enum dma_data_direction dir,
  939. unsigned long attrs)
  940. {
  941. if (sgt->orig_nents) {
  942. dma_unmap_sgtable(dev, sgt, dir, attrs);
  943. sg_free_table(sgt);
  944. sgt->orig_nents = 0;
  945. sgt->nents = 0;
  946. }
  947. }
  948. void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
  949. struct sg_table *sgt, enum dma_data_direction dir)
  950. {
  951. spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
  952. }
  953. static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
  954. {
  955. struct device *tx_dev, *rx_dev;
  956. struct spi_transfer *xfer;
  957. int ret;
  958. if (!ctlr->can_dma)
  959. return 0;
  960. if (ctlr->dma_tx)
  961. tx_dev = ctlr->dma_tx->device->dev;
  962. else if (ctlr->dma_map_dev)
  963. tx_dev = ctlr->dma_map_dev;
  964. else
  965. tx_dev = ctlr->dev.parent;
  966. if (ctlr->dma_rx)
  967. rx_dev = ctlr->dma_rx->device->dev;
  968. else if (ctlr->dma_map_dev)
  969. rx_dev = ctlr->dma_map_dev;
  970. else
  971. rx_dev = ctlr->dev.parent;
  972. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  973. /* The sync is done before each transfer. */
  974. unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
  975. if (!ctlr->can_dma(ctlr, msg->spi, xfer))
  976. continue;
  977. if (xfer->tx_buf != NULL) {
  978. ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
  979. (void *)xfer->tx_buf,
  980. xfer->len, DMA_TO_DEVICE,
  981. attrs);
  982. if (ret != 0)
  983. return ret;
  984. }
  985. if (xfer->rx_buf != NULL) {
  986. ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
  987. xfer->rx_buf, xfer->len,
  988. DMA_FROM_DEVICE, attrs);
  989. if (ret != 0) {
  990. spi_unmap_buf_attrs(ctlr, tx_dev,
  991. &xfer->tx_sg, DMA_TO_DEVICE,
  992. attrs);
  993. return ret;
  994. }
  995. }
  996. }
  997. ctlr->cur_rx_dma_dev = rx_dev;
  998. ctlr->cur_tx_dma_dev = tx_dev;
  999. ctlr->cur_msg_mapped = true;
  1000. return 0;
  1001. }
  1002. static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
  1003. {
  1004. struct device *rx_dev = ctlr->cur_rx_dma_dev;
  1005. struct device *tx_dev = ctlr->cur_tx_dma_dev;
  1006. struct spi_transfer *xfer;
  1007. if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
  1008. return 0;
  1009. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1010. /* The sync has already been done after each transfer. */
  1011. unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
  1012. if (!ctlr->can_dma(ctlr, msg->spi, xfer))
  1013. continue;
  1014. spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
  1015. DMA_FROM_DEVICE, attrs);
  1016. spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
  1017. DMA_TO_DEVICE, attrs);
  1018. }
  1019. ctlr->cur_msg_mapped = false;
  1020. return 0;
  1021. }
  1022. static void spi_dma_sync_for_device(struct spi_controller *ctlr,
  1023. struct spi_transfer *xfer)
  1024. {
  1025. struct device *rx_dev = ctlr->cur_rx_dma_dev;
  1026. struct device *tx_dev = ctlr->cur_tx_dma_dev;
  1027. if (!ctlr->cur_msg_mapped)
  1028. return;
  1029. if (xfer->tx_sg.orig_nents)
  1030. dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
  1031. if (xfer->rx_sg.orig_nents)
  1032. dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
  1033. }
  1034. static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
  1035. struct spi_transfer *xfer)
  1036. {
  1037. struct device *rx_dev = ctlr->cur_rx_dma_dev;
  1038. struct device *tx_dev = ctlr->cur_tx_dma_dev;
  1039. if (!ctlr->cur_msg_mapped)
  1040. return;
  1041. if (xfer->rx_sg.orig_nents)
  1042. dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
  1043. if (xfer->tx_sg.orig_nents)
  1044. dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
  1045. }
  1046. #else /* !CONFIG_HAS_DMA */
  1047. static inline int __spi_map_msg(struct spi_controller *ctlr,
  1048. struct spi_message *msg)
  1049. {
  1050. return 0;
  1051. }
  1052. static inline int __spi_unmap_msg(struct spi_controller *ctlr,
  1053. struct spi_message *msg)
  1054. {
  1055. return 0;
  1056. }
  1057. static void spi_dma_sync_for_device(struct spi_controller *ctrl,
  1058. struct spi_transfer *xfer)
  1059. {
  1060. }
  1061. static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
  1062. struct spi_transfer *xfer)
  1063. {
  1064. }
  1065. #endif /* !CONFIG_HAS_DMA */
  1066. static inline int spi_unmap_msg(struct spi_controller *ctlr,
  1067. struct spi_message *msg)
  1068. {
  1069. struct spi_transfer *xfer;
  1070. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1071. /*
  1072. * Restore the original value of tx_buf or rx_buf if they are
  1073. * NULL.
  1074. */
  1075. if (xfer->tx_buf == ctlr->dummy_tx)
  1076. xfer->tx_buf = NULL;
  1077. if (xfer->rx_buf == ctlr->dummy_rx)
  1078. xfer->rx_buf = NULL;
  1079. }
  1080. return __spi_unmap_msg(ctlr, msg);
  1081. }
  1082. static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
  1083. {
  1084. struct spi_transfer *xfer;
  1085. void *tmp;
  1086. unsigned int max_tx, max_rx;
  1087. if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
  1088. && !(msg->spi->mode & SPI_3WIRE)) {
  1089. max_tx = 0;
  1090. max_rx = 0;
  1091. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1092. if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
  1093. !xfer->tx_buf)
  1094. max_tx = max(xfer->len, max_tx);
  1095. if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
  1096. !xfer->rx_buf)
  1097. max_rx = max(xfer->len, max_rx);
  1098. }
  1099. if (max_tx) {
  1100. tmp = krealloc(ctlr->dummy_tx, max_tx,
  1101. GFP_KERNEL | GFP_DMA | __GFP_ZERO);
  1102. if (!tmp)
  1103. return -ENOMEM;
  1104. ctlr->dummy_tx = tmp;
  1105. }
  1106. if (max_rx) {
  1107. tmp = krealloc(ctlr->dummy_rx, max_rx,
  1108. GFP_KERNEL | GFP_DMA);
  1109. if (!tmp)
  1110. return -ENOMEM;
  1111. ctlr->dummy_rx = tmp;
  1112. }
  1113. if (max_tx || max_rx) {
  1114. list_for_each_entry(xfer, &msg->transfers,
  1115. transfer_list) {
  1116. if (!xfer->len)
  1117. continue;
  1118. if (!xfer->tx_buf)
  1119. xfer->tx_buf = ctlr->dummy_tx;
  1120. if (!xfer->rx_buf)
  1121. xfer->rx_buf = ctlr->dummy_rx;
  1122. }
  1123. }
  1124. }
  1125. return __spi_map_msg(ctlr, msg);
  1126. }
  1127. static int spi_transfer_wait(struct spi_controller *ctlr,
  1128. struct spi_message *msg,
  1129. struct spi_transfer *xfer)
  1130. {
  1131. struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
  1132. struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
  1133. u32 speed_hz = xfer->speed_hz;
  1134. unsigned long long ms;
  1135. if (spi_controller_is_slave(ctlr)) {
  1136. if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
  1137. dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
  1138. return -EINTR;
  1139. }
  1140. } else {
  1141. if (!speed_hz)
  1142. speed_hz = 100000;
  1143. /*
  1144. * For each byte we wait for 8 cycles of the SPI clock.
  1145. * Since speed is defined in Hz and we want milliseconds,
  1146. * use respective multiplier, but before the division,
  1147. * otherwise we may get 0 for short transfers.
  1148. */
  1149. ms = 8LL * MSEC_PER_SEC * xfer->len;
  1150. do_div(ms, speed_hz);
  1151. /*
  1152. * Increase it twice and add 200 ms tolerance, use
  1153. * predefined maximum in case of overflow.
  1154. */
  1155. ms += ms + 200;
  1156. if (ms > UINT_MAX)
  1157. ms = UINT_MAX;
  1158. ms = wait_for_completion_timeout(&ctlr->xfer_completion,
  1159. msecs_to_jiffies(ms));
  1160. if (ms == 0) {
  1161. SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
  1162. SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
  1163. dev_err(&msg->spi->dev,
  1164. "SPI transfer timed out\n");
  1165. return -ETIMEDOUT;
  1166. }
  1167. }
  1168. return 0;
  1169. }
  1170. static void _spi_transfer_delay_ns(u32 ns)
  1171. {
  1172. if (!ns)
  1173. return;
  1174. if (ns <= NSEC_PER_USEC) {
  1175. ndelay(ns);
  1176. } else {
  1177. u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
  1178. if (us <= 10)
  1179. udelay(us);
  1180. else
  1181. usleep_range(us, us + DIV_ROUND_UP(us, 10));
  1182. }
  1183. }
  1184. int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
  1185. {
  1186. u32 delay = _delay->value;
  1187. u32 unit = _delay->unit;
  1188. u32 hz;
  1189. if (!delay)
  1190. return 0;
  1191. switch (unit) {
  1192. case SPI_DELAY_UNIT_USECS:
  1193. delay *= NSEC_PER_USEC;
  1194. break;
  1195. case SPI_DELAY_UNIT_NSECS:
  1196. /* Nothing to do here */
  1197. break;
  1198. case SPI_DELAY_UNIT_SCK:
  1199. /* Clock cycles need to be obtained from spi_transfer */
  1200. if (!xfer)
  1201. return -EINVAL;
  1202. /*
  1203. * If there is unknown effective speed, approximate it
  1204. * by underestimating with half of the requested hz.
  1205. */
  1206. hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
  1207. if (!hz)
  1208. return -EINVAL;
  1209. /* Convert delay to nanoseconds */
  1210. delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
  1211. break;
  1212. default:
  1213. return -EINVAL;
  1214. }
  1215. return delay;
  1216. }
  1217. EXPORT_SYMBOL_GPL(spi_delay_to_ns);
  1218. int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
  1219. {
  1220. int delay;
  1221. might_sleep();
  1222. if (!_delay)
  1223. return -EINVAL;
  1224. delay = spi_delay_to_ns(_delay, xfer);
  1225. if (delay < 0)
  1226. return delay;
  1227. _spi_transfer_delay_ns(delay);
  1228. return 0;
  1229. }
  1230. EXPORT_SYMBOL_GPL(spi_delay_exec);
  1231. static void _spi_transfer_cs_change_delay(struct spi_message *msg,
  1232. struct spi_transfer *xfer)
  1233. {
  1234. u32 default_delay_ns = 10 * NSEC_PER_USEC;
  1235. u32 delay = xfer->cs_change_delay.value;
  1236. u32 unit = xfer->cs_change_delay.unit;
  1237. int ret;
  1238. /* Return early on "fast" mode - for everything but USECS */
  1239. if (!delay) {
  1240. if (unit == SPI_DELAY_UNIT_USECS)
  1241. _spi_transfer_delay_ns(default_delay_ns);
  1242. return;
  1243. }
  1244. ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
  1245. if (ret) {
  1246. dev_err_once(&msg->spi->dev,
  1247. "Use of unsupported delay unit %i, using default of %luus\n",
  1248. unit, default_delay_ns / NSEC_PER_USEC);
  1249. _spi_transfer_delay_ns(default_delay_ns);
  1250. }
  1251. }
  1252. /*
  1253. * spi_transfer_one_message - Default implementation of transfer_one_message()
  1254. *
  1255. * This is a standard implementation of transfer_one_message() for
  1256. * drivers which implement a transfer_one() operation. It provides
  1257. * standard handling of delays and chip select management.
  1258. */
  1259. static int spi_transfer_one_message(struct spi_controller *ctlr,
  1260. struct spi_message *msg)
  1261. {
  1262. struct spi_transfer *xfer;
  1263. bool keep_cs = false;
  1264. int ret = 0;
  1265. struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
  1266. struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
  1267. xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
  1268. spi_set_cs(msg->spi, !xfer->cs_off, false);
  1269. SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
  1270. SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
  1271. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1272. trace_spi_transfer_start(msg, xfer);
  1273. spi_statistics_add_transfer_stats(statm, xfer, ctlr);
  1274. spi_statistics_add_transfer_stats(stats, xfer, ctlr);
  1275. if (!ctlr->ptp_sts_supported) {
  1276. xfer->ptp_sts_word_pre = 0;
  1277. ptp_read_system_prets(xfer->ptp_sts);
  1278. }
  1279. if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
  1280. reinit_completion(&ctlr->xfer_completion);
  1281. fallback_pio:
  1282. spi_dma_sync_for_device(ctlr, xfer);
  1283. ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
  1284. if (ret < 0) {
  1285. spi_dma_sync_for_cpu(ctlr, xfer);
  1286. if (ctlr->cur_msg_mapped &&
  1287. (xfer->error & SPI_TRANS_FAIL_NO_START)) {
  1288. __spi_unmap_msg(ctlr, msg);
  1289. ctlr->fallback = true;
  1290. xfer->error &= ~SPI_TRANS_FAIL_NO_START;
  1291. goto fallback_pio;
  1292. }
  1293. SPI_STATISTICS_INCREMENT_FIELD(statm,
  1294. errors);
  1295. SPI_STATISTICS_INCREMENT_FIELD(stats,
  1296. errors);
  1297. dev_err(&msg->spi->dev,
  1298. "SPI transfer failed: %d\n", ret);
  1299. goto out;
  1300. }
  1301. if (ret > 0) {
  1302. ret = spi_transfer_wait(ctlr, msg, xfer);
  1303. if (ret < 0)
  1304. msg->status = ret;
  1305. }
  1306. spi_dma_sync_for_cpu(ctlr, xfer);
  1307. } else {
  1308. if (xfer->len)
  1309. dev_err(&msg->spi->dev,
  1310. "Bufferless transfer has length %u\n",
  1311. xfer->len);
  1312. }
  1313. if (!ctlr->ptp_sts_supported) {
  1314. ptp_read_system_postts(xfer->ptp_sts);
  1315. xfer->ptp_sts_word_post = xfer->len;
  1316. }
  1317. trace_spi_transfer_stop(msg, xfer);
  1318. if (msg->status != -EINPROGRESS)
  1319. goto out;
  1320. spi_transfer_delay_exec(xfer);
  1321. if (xfer->cs_change) {
  1322. if (list_is_last(&xfer->transfer_list,
  1323. &msg->transfers)) {
  1324. keep_cs = true;
  1325. } else {
  1326. if (!xfer->cs_off)
  1327. spi_set_cs(msg->spi, false, false);
  1328. _spi_transfer_cs_change_delay(msg, xfer);
  1329. if (!list_next_entry(xfer, transfer_list)->cs_off)
  1330. spi_set_cs(msg->spi, true, false);
  1331. }
  1332. } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
  1333. xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
  1334. spi_set_cs(msg->spi, xfer->cs_off, false);
  1335. }
  1336. msg->actual_length += xfer->len;
  1337. }
  1338. out:
  1339. if (ret != 0 || !keep_cs)
  1340. spi_set_cs(msg->spi, false, false);
  1341. if (msg->status == -EINPROGRESS)
  1342. msg->status = ret;
  1343. if (msg->status && ctlr->handle_err)
  1344. ctlr->handle_err(ctlr, msg);
  1345. spi_finalize_current_message(ctlr);
  1346. return ret;
  1347. }
  1348. /**
  1349. * spi_finalize_current_transfer - report completion of a transfer
  1350. * @ctlr: the controller reporting completion
  1351. *
  1352. * Called by SPI drivers using the core transfer_one_message()
  1353. * implementation to notify it that the current interrupt driven
  1354. * transfer has finished and the next one may be scheduled.
  1355. */
  1356. void spi_finalize_current_transfer(struct spi_controller *ctlr)
  1357. {
  1358. complete(&ctlr->xfer_completion);
  1359. }
  1360. EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  1361. static void spi_idle_runtime_pm(struct spi_controller *ctlr)
  1362. {
  1363. if (ctlr->auto_runtime_pm) {
  1364. pm_runtime_mark_last_busy(ctlr->dev.parent);
  1365. pm_runtime_put_autosuspend(ctlr->dev.parent);
  1366. }
  1367. }
  1368. static int __spi_pump_transfer_message(struct spi_controller *ctlr,
  1369. struct spi_message *msg, bool was_busy)
  1370. {
  1371. struct spi_transfer *xfer;
  1372. int ret;
  1373. if (!was_busy && ctlr->auto_runtime_pm) {
  1374. ret = pm_runtime_get_sync(ctlr->dev.parent);
  1375. if (ret < 0) {
  1376. pm_runtime_put_noidle(ctlr->dev.parent);
  1377. dev_err(&ctlr->dev, "Failed to power device: %d\n",
  1378. ret);
  1379. return ret;
  1380. }
  1381. }
  1382. if (!was_busy)
  1383. trace_spi_controller_busy(ctlr);
  1384. if (!was_busy && ctlr->prepare_transfer_hardware) {
  1385. ret = ctlr->prepare_transfer_hardware(ctlr);
  1386. if (ret) {
  1387. dev_err(&ctlr->dev,
  1388. "failed to prepare transfer hardware: %d\n",
  1389. ret);
  1390. if (ctlr->auto_runtime_pm)
  1391. pm_runtime_put(ctlr->dev.parent);
  1392. msg->status = ret;
  1393. spi_finalize_current_message(ctlr);
  1394. return ret;
  1395. }
  1396. }
  1397. trace_spi_message_start(msg);
  1398. ret = spi_split_transfers_maxsize(ctlr, msg,
  1399. spi_max_transfer_size(msg->spi),
  1400. GFP_KERNEL | GFP_DMA);
  1401. if (ret) {
  1402. msg->status = ret;
  1403. spi_finalize_current_message(ctlr);
  1404. return ret;
  1405. }
  1406. if (ctlr->prepare_message) {
  1407. ret = ctlr->prepare_message(ctlr, msg);
  1408. if (ret) {
  1409. dev_err(&ctlr->dev, "failed to prepare message: %d\n",
  1410. ret);
  1411. msg->status = ret;
  1412. spi_finalize_current_message(ctlr);
  1413. return ret;
  1414. }
  1415. msg->prepared = true;
  1416. }
  1417. ret = spi_map_msg(ctlr, msg);
  1418. if (ret) {
  1419. msg->status = ret;
  1420. spi_finalize_current_message(ctlr);
  1421. return ret;
  1422. }
  1423. if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
  1424. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1425. xfer->ptp_sts_word_pre = 0;
  1426. ptp_read_system_prets(xfer->ptp_sts);
  1427. }
  1428. }
  1429. /*
  1430. * Drivers implementation of transfer_one_message() must arrange for
  1431. * spi_finalize_current_message() to get called. Most drivers will do
  1432. * this in the calling context, but some don't. For those cases, a
  1433. * completion is used to guarantee that this function does not return
  1434. * until spi_finalize_current_message() is done accessing
  1435. * ctlr->cur_msg.
  1436. * Use of the following two flags enable to opportunistically skip the
  1437. * use of the completion since its use involves expensive spin locks.
  1438. * In case of a race with the context that calls
  1439. * spi_finalize_current_message() the completion will always be used,
  1440. * due to strict ordering of these flags using barriers.
  1441. */
  1442. WRITE_ONCE(ctlr->cur_msg_incomplete, true);
  1443. WRITE_ONCE(ctlr->cur_msg_need_completion, false);
  1444. reinit_completion(&ctlr->cur_msg_completion);
  1445. smp_wmb(); /* Make these available to spi_finalize_current_message() */
  1446. ret = ctlr->transfer_one_message(ctlr, msg);
  1447. if (ret) {
  1448. dev_err(&ctlr->dev,
  1449. "failed to transfer one message from queue\n");
  1450. return ret;
  1451. }
  1452. WRITE_ONCE(ctlr->cur_msg_need_completion, true);
  1453. smp_mb(); /* See spi_finalize_current_message()... */
  1454. if (READ_ONCE(ctlr->cur_msg_incomplete))
  1455. wait_for_completion(&ctlr->cur_msg_completion);
  1456. return 0;
  1457. }
  1458. /**
  1459. * __spi_pump_messages - function which processes spi message queue
  1460. * @ctlr: controller to process queue for
  1461. * @in_kthread: true if we are in the context of the message pump thread
  1462. *
  1463. * This function checks if there is any spi message in the queue that
  1464. * needs processing and if so call out to the driver to initialize hardware
  1465. * and transfer each message.
  1466. *
  1467. * Note that it is called both from the kthread itself and also from
  1468. * inside spi_sync(); the queue extraction handling at the top of the
  1469. * function should deal with this safely.
  1470. */
  1471. static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
  1472. {
  1473. struct spi_message *msg;
  1474. bool was_busy = false;
  1475. unsigned long flags;
  1476. int ret;
  1477. /* Take the IO mutex */
  1478. mutex_lock(&ctlr->io_mutex);
  1479. /* Lock queue */
  1480. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1481. /* Make sure we are not already running a message */
  1482. if (ctlr->cur_msg)
  1483. goto out_unlock;
  1484. /* Check if the queue is idle */
  1485. if (list_empty(&ctlr->queue) || !ctlr->running) {
  1486. if (!ctlr->busy)
  1487. goto out_unlock;
  1488. /* Defer any non-atomic teardown to the thread */
  1489. if (!in_kthread) {
  1490. if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
  1491. !ctlr->unprepare_transfer_hardware) {
  1492. spi_idle_runtime_pm(ctlr);
  1493. ctlr->busy = false;
  1494. ctlr->queue_empty = true;
  1495. trace_spi_controller_idle(ctlr);
  1496. } else {
  1497. kthread_queue_work(ctlr->kworker,
  1498. &ctlr->pump_messages);
  1499. }
  1500. goto out_unlock;
  1501. }
  1502. ctlr->busy = false;
  1503. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1504. kfree(ctlr->dummy_rx);
  1505. ctlr->dummy_rx = NULL;
  1506. kfree(ctlr->dummy_tx);
  1507. ctlr->dummy_tx = NULL;
  1508. if (ctlr->unprepare_transfer_hardware &&
  1509. ctlr->unprepare_transfer_hardware(ctlr))
  1510. dev_err(&ctlr->dev,
  1511. "failed to unprepare transfer hardware\n");
  1512. spi_idle_runtime_pm(ctlr);
  1513. trace_spi_controller_idle(ctlr);
  1514. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1515. ctlr->queue_empty = true;
  1516. goto out_unlock;
  1517. }
  1518. /* Extract head of queue */
  1519. msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
  1520. ctlr->cur_msg = msg;
  1521. list_del_init(&msg->queue);
  1522. if (ctlr->busy)
  1523. was_busy = true;
  1524. else
  1525. ctlr->busy = true;
  1526. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1527. ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
  1528. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1529. ctlr->cur_msg = NULL;
  1530. ctlr->fallback = false;
  1531. mutex_unlock(&ctlr->io_mutex);
  1532. /* Prod the scheduler in case transfer_one() was busy waiting */
  1533. if (!ret)
  1534. cond_resched();
  1535. return;
  1536. out_unlock:
  1537. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1538. mutex_unlock(&ctlr->io_mutex);
  1539. }
  1540. /**
  1541. * spi_pump_messages - kthread work function which processes spi message queue
  1542. * @work: pointer to kthread work struct contained in the controller struct
  1543. */
  1544. static void spi_pump_messages(struct kthread_work *work)
  1545. {
  1546. struct spi_controller *ctlr =
  1547. container_of(work, struct spi_controller, pump_messages);
  1548. __spi_pump_messages(ctlr, true);
  1549. }
  1550. /**
  1551. * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
  1552. * @ctlr: Pointer to the spi_controller structure of the driver
  1553. * @xfer: Pointer to the transfer being timestamped
  1554. * @progress: How many words (not bytes) have been transferred so far
  1555. * @irqs_off: If true, will disable IRQs and preemption for the duration of the
  1556. * transfer, for less jitter in time measurement. Only compatible
  1557. * with PIO drivers. If true, must follow up with
  1558. * spi_take_timestamp_post or otherwise system will crash.
  1559. * WARNING: for fully predictable results, the CPU frequency must
  1560. * also be under control (governor).
  1561. *
  1562. * This is a helper for drivers to collect the beginning of the TX timestamp
  1563. * for the requested byte from the SPI transfer. The frequency with which this
  1564. * function must be called (once per word, once for the whole transfer, once
  1565. * per batch of words etc) is arbitrary as long as the @tx buffer offset is
  1566. * greater than or equal to the requested byte at the time of the call. The
  1567. * timestamp is only taken once, at the first such call. It is assumed that
  1568. * the driver advances its @tx buffer pointer monotonically.
  1569. */
  1570. void spi_take_timestamp_pre(struct spi_controller *ctlr,
  1571. struct spi_transfer *xfer,
  1572. size_t progress, bool irqs_off)
  1573. {
  1574. if (!xfer->ptp_sts)
  1575. return;
  1576. if (xfer->timestamped)
  1577. return;
  1578. if (progress > xfer->ptp_sts_word_pre)
  1579. return;
  1580. /* Capture the resolution of the timestamp */
  1581. xfer->ptp_sts_word_pre = progress;
  1582. if (irqs_off) {
  1583. local_irq_save(ctlr->irq_flags);
  1584. preempt_disable();
  1585. }
  1586. ptp_read_system_prets(xfer->ptp_sts);
  1587. }
  1588. EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
  1589. /**
  1590. * spi_take_timestamp_post - helper to collect the end of the TX timestamp
  1591. * @ctlr: Pointer to the spi_controller structure of the driver
  1592. * @xfer: Pointer to the transfer being timestamped
  1593. * @progress: How many words (not bytes) have been transferred so far
  1594. * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
  1595. *
  1596. * This is a helper for drivers to collect the end of the TX timestamp for
  1597. * the requested byte from the SPI transfer. Can be called with an arbitrary
  1598. * frequency: only the first call where @tx exceeds or is equal to the
  1599. * requested word will be timestamped.
  1600. */
  1601. void spi_take_timestamp_post(struct spi_controller *ctlr,
  1602. struct spi_transfer *xfer,
  1603. size_t progress, bool irqs_off)
  1604. {
  1605. if (!xfer->ptp_sts)
  1606. return;
  1607. if (xfer->timestamped)
  1608. return;
  1609. if (progress < xfer->ptp_sts_word_post)
  1610. return;
  1611. ptp_read_system_postts(xfer->ptp_sts);
  1612. if (irqs_off) {
  1613. local_irq_restore(ctlr->irq_flags);
  1614. preempt_enable();
  1615. }
  1616. /* Capture the resolution of the timestamp */
  1617. xfer->ptp_sts_word_post = progress;
  1618. xfer->timestamped = true;
  1619. }
  1620. EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
  1621. /**
  1622. * spi_set_thread_rt - set the controller to pump at realtime priority
  1623. * @ctlr: controller to boost priority of
  1624. *
  1625. * This can be called because the controller requested realtime priority
  1626. * (by setting the ->rt value before calling spi_register_controller()) or
  1627. * because a device on the bus said that its transfers needed realtime
  1628. * priority.
  1629. *
  1630. * NOTE: at the moment if any device on a bus says it needs realtime then
  1631. * the thread will be at realtime priority for all transfers on that
  1632. * controller. If this eventually becomes a problem we may see if we can
  1633. * find a way to boost the priority only temporarily during relevant
  1634. * transfers.
  1635. */
  1636. static void spi_set_thread_rt(struct spi_controller *ctlr)
  1637. {
  1638. dev_info(&ctlr->dev,
  1639. "will run message pump with realtime priority\n");
  1640. sched_set_fifo(ctlr->kworker->task);
  1641. }
  1642. static int spi_init_queue(struct spi_controller *ctlr)
  1643. {
  1644. ctlr->running = false;
  1645. ctlr->busy = false;
  1646. ctlr->queue_empty = true;
  1647. ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
  1648. if (IS_ERR(ctlr->kworker)) {
  1649. dev_err(&ctlr->dev, "failed to create message pump kworker\n");
  1650. return PTR_ERR(ctlr->kworker);
  1651. }
  1652. kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
  1653. /*
  1654. * Controller config will indicate if this controller should run the
  1655. * message pump with high (realtime) priority to reduce the transfer
  1656. * latency on the bus by minimising the delay between a transfer
  1657. * request and the scheduling of the message pump thread. Without this
  1658. * setting the message pump thread will remain at default priority.
  1659. */
  1660. if (ctlr->rt)
  1661. spi_set_thread_rt(ctlr);
  1662. return 0;
  1663. }
  1664. /**
  1665. * spi_get_next_queued_message() - called by driver to check for queued
  1666. * messages
  1667. * @ctlr: the controller to check for queued messages
  1668. *
  1669. * If there are more messages in the queue, the next message is returned from
  1670. * this call.
  1671. *
  1672. * Return: the next message in the queue, else NULL if the queue is empty.
  1673. */
  1674. struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
  1675. {
  1676. struct spi_message *next;
  1677. unsigned long flags;
  1678. /* Get a pointer to the next message, if any */
  1679. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1680. next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
  1681. queue);
  1682. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1683. return next;
  1684. }
  1685. EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
  1686. /**
  1687. * spi_finalize_current_message() - the current message is complete
  1688. * @ctlr: the controller to return the message to
  1689. *
  1690. * Called by the driver to notify the core that the message in the front of the
  1691. * queue is complete and can be removed from the queue.
  1692. */
  1693. void spi_finalize_current_message(struct spi_controller *ctlr)
  1694. {
  1695. struct spi_transfer *xfer;
  1696. struct spi_message *mesg;
  1697. int ret;
  1698. mesg = ctlr->cur_msg;
  1699. if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
  1700. list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
  1701. ptp_read_system_postts(xfer->ptp_sts);
  1702. xfer->ptp_sts_word_post = xfer->len;
  1703. }
  1704. }
  1705. if (unlikely(ctlr->ptp_sts_supported))
  1706. list_for_each_entry(xfer, &mesg->transfers, transfer_list)
  1707. WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
  1708. spi_unmap_msg(ctlr, mesg);
  1709. /*
  1710. * In the prepare_messages callback the SPI bus has the opportunity
  1711. * to split a transfer to smaller chunks.
  1712. *
  1713. * Release the split transfers here since spi_map_msg() is done on
  1714. * the split transfers.
  1715. */
  1716. spi_res_release(ctlr, mesg);
  1717. if (mesg->prepared && ctlr->unprepare_message) {
  1718. ret = ctlr->unprepare_message(ctlr, mesg);
  1719. if (ret) {
  1720. dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
  1721. ret);
  1722. }
  1723. }
  1724. mesg->prepared = false;
  1725. WRITE_ONCE(ctlr->cur_msg_incomplete, false);
  1726. smp_mb(); /* See __spi_pump_transfer_message()... */
  1727. if (READ_ONCE(ctlr->cur_msg_need_completion))
  1728. complete(&ctlr->cur_msg_completion);
  1729. trace_spi_message_done(mesg);
  1730. mesg->state = NULL;
  1731. if (mesg->complete)
  1732. mesg->complete(mesg->context);
  1733. }
  1734. EXPORT_SYMBOL_GPL(spi_finalize_current_message);
  1735. static int spi_start_queue(struct spi_controller *ctlr)
  1736. {
  1737. unsigned long flags;
  1738. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1739. if (ctlr->running || ctlr->busy) {
  1740. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1741. return -EBUSY;
  1742. }
  1743. ctlr->running = true;
  1744. ctlr->cur_msg = NULL;
  1745. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1746. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1747. return 0;
  1748. }
  1749. static int spi_stop_queue(struct spi_controller *ctlr)
  1750. {
  1751. unsigned long flags;
  1752. unsigned limit = 500;
  1753. int ret = 0;
  1754. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1755. /*
  1756. * This is a bit lame, but is optimized for the common execution path.
  1757. * A wait_queue on the ctlr->busy could be used, but then the common
  1758. * execution path (pump_messages) would be required to call wake_up or
  1759. * friends on every SPI message. Do this instead.
  1760. */
  1761. while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
  1762. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1763. usleep_range(10000, 11000);
  1764. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1765. }
  1766. if (!list_empty(&ctlr->queue) || ctlr->busy)
  1767. ret = -EBUSY;
  1768. else
  1769. ctlr->running = false;
  1770. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1771. if (ret) {
  1772. dev_warn(&ctlr->dev, "could not stop message queue\n");
  1773. return ret;
  1774. }
  1775. return ret;
  1776. }
  1777. static int spi_destroy_queue(struct spi_controller *ctlr)
  1778. {
  1779. int ret;
  1780. ret = spi_stop_queue(ctlr);
  1781. /*
  1782. * kthread_flush_worker will block until all work is done.
  1783. * If the reason that stop_queue timed out is that the work will never
  1784. * finish, then it does no good to call flush/stop thread, so
  1785. * return anyway.
  1786. */
  1787. if (ret) {
  1788. dev_err(&ctlr->dev, "problem destroying queue\n");
  1789. return ret;
  1790. }
  1791. kthread_destroy_worker(ctlr->kworker);
  1792. return 0;
  1793. }
  1794. static int __spi_queued_transfer(struct spi_device *spi,
  1795. struct spi_message *msg,
  1796. bool need_pump)
  1797. {
  1798. struct spi_controller *ctlr = spi->controller;
  1799. unsigned long flags;
  1800. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1801. if (!ctlr->running) {
  1802. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1803. return -ESHUTDOWN;
  1804. }
  1805. msg->actual_length = 0;
  1806. msg->status = -EINPROGRESS;
  1807. list_add_tail(&msg->queue, &ctlr->queue);
  1808. ctlr->queue_empty = false;
  1809. if (!ctlr->busy && need_pump)
  1810. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1811. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1812. return 0;
  1813. }
  1814. /**
  1815. * spi_queued_transfer - transfer function for queued transfers
  1816. * @spi: spi device which is requesting transfer
  1817. * @msg: spi message which is to handled is queued to driver queue
  1818. *
  1819. * Return: zero on success, else a negative error code.
  1820. */
  1821. static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  1822. {
  1823. return __spi_queued_transfer(spi, msg, true);
  1824. }
  1825. static int spi_controller_initialize_queue(struct spi_controller *ctlr)
  1826. {
  1827. int ret;
  1828. ctlr->transfer = spi_queued_transfer;
  1829. if (!ctlr->transfer_one_message)
  1830. ctlr->transfer_one_message = spi_transfer_one_message;
  1831. /* Initialize and start queue */
  1832. ret = spi_init_queue(ctlr);
  1833. if (ret) {
  1834. dev_err(&ctlr->dev, "problem initializing queue\n");
  1835. goto err_init_queue;
  1836. }
  1837. ctlr->queued = true;
  1838. ret = spi_start_queue(ctlr);
  1839. if (ret) {
  1840. dev_err(&ctlr->dev, "problem starting queue\n");
  1841. goto err_start_queue;
  1842. }
  1843. return 0;
  1844. err_start_queue:
  1845. spi_destroy_queue(ctlr);
  1846. err_init_queue:
  1847. return ret;
  1848. }
  1849. /**
  1850. * spi_flush_queue - Send all pending messages in the queue from the callers'
  1851. * context
  1852. * @ctlr: controller to process queue for
  1853. *
  1854. * This should be used when one wants to ensure all pending messages have been
  1855. * sent before doing something. Is used by the spi-mem code to make sure SPI
  1856. * memory operations do not preempt regular SPI transfers that have been queued
  1857. * before the spi-mem operation.
  1858. */
  1859. void spi_flush_queue(struct spi_controller *ctlr)
  1860. {
  1861. if (ctlr->transfer == spi_queued_transfer)
  1862. __spi_pump_messages(ctlr, false);
  1863. }
  1864. /*-------------------------------------------------------------------------*/
  1865. #if defined(CONFIG_OF)
  1866. static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
  1867. struct device_node *nc)
  1868. {
  1869. u32 value;
  1870. int rc;
  1871. /* Mode (clock phase/polarity/etc.) */
  1872. if (of_property_read_bool(nc, "spi-cpha"))
  1873. spi->mode |= SPI_CPHA;
  1874. if (of_property_read_bool(nc, "spi-cpol"))
  1875. spi->mode |= SPI_CPOL;
  1876. if (of_property_read_bool(nc, "spi-3wire"))
  1877. spi->mode |= SPI_3WIRE;
  1878. if (of_property_read_bool(nc, "spi-lsb-first"))
  1879. spi->mode |= SPI_LSB_FIRST;
  1880. if (of_property_read_bool(nc, "spi-cs-high"))
  1881. spi->mode |= SPI_CS_HIGH;
  1882. /* Device DUAL/QUAD mode */
  1883. if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
  1884. switch (value) {
  1885. case 0:
  1886. spi->mode |= SPI_NO_TX;
  1887. break;
  1888. case 1:
  1889. break;
  1890. case 2:
  1891. spi->mode |= SPI_TX_DUAL;
  1892. break;
  1893. case 4:
  1894. spi->mode |= SPI_TX_QUAD;
  1895. break;
  1896. case 8:
  1897. spi->mode |= SPI_TX_OCTAL;
  1898. break;
  1899. default:
  1900. dev_warn(&ctlr->dev,
  1901. "spi-tx-bus-width %d not supported\n",
  1902. value);
  1903. break;
  1904. }
  1905. }
  1906. if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
  1907. switch (value) {
  1908. case 0:
  1909. spi->mode |= SPI_NO_RX;
  1910. break;
  1911. case 1:
  1912. break;
  1913. case 2:
  1914. spi->mode |= SPI_RX_DUAL;
  1915. break;
  1916. case 4:
  1917. spi->mode |= SPI_RX_QUAD;
  1918. break;
  1919. case 8:
  1920. spi->mode |= SPI_RX_OCTAL;
  1921. break;
  1922. default:
  1923. dev_warn(&ctlr->dev,
  1924. "spi-rx-bus-width %d not supported\n",
  1925. value);
  1926. break;
  1927. }
  1928. }
  1929. if (spi_controller_is_slave(ctlr)) {
  1930. if (!of_node_name_eq(nc, "slave")) {
  1931. dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
  1932. nc);
  1933. return -EINVAL;
  1934. }
  1935. return 0;
  1936. }
  1937. /* Device address */
  1938. rc = of_property_read_u32(nc, "reg", &value);
  1939. if (rc) {
  1940. dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
  1941. nc, rc);
  1942. return rc;
  1943. }
  1944. spi->chip_select = value;
  1945. /* Device speed */
  1946. if (!of_property_read_u32(nc, "spi-max-frequency", &value))
  1947. spi->max_speed_hz = value;
  1948. return 0;
  1949. }
  1950. static struct spi_device *
  1951. of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
  1952. {
  1953. struct spi_device *spi;
  1954. int rc;
  1955. /* Alloc an spi_device */
  1956. spi = spi_alloc_device(ctlr);
  1957. if (!spi) {
  1958. dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
  1959. rc = -ENOMEM;
  1960. goto err_out;
  1961. }
  1962. /* Select device driver */
  1963. rc = of_modalias_node(nc, spi->modalias,
  1964. sizeof(spi->modalias));
  1965. if (rc < 0) {
  1966. dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
  1967. goto err_out;
  1968. }
  1969. rc = of_spi_parse_dt(ctlr, spi, nc);
  1970. if (rc)
  1971. goto err_out;
  1972. /* Store a pointer to the node in the device structure */
  1973. of_node_get(nc);
  1974. spi->dev.of_node = nc;
  1975. spi->dev.fwnode = of_fwnode_handle(nc);
  1976. /* Register the new device */
  1977. rc = spi_add_device(spi);
  1978. if (rc) {
  1979. dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
  1980. goto err_of_node_put;
  1981. }
  1982. return spi;
  1983. err_of_node_put:
  1984. of_node_put(nc);
  1985. err_out:
  1986. spi_dev_put(spi);
  1987. return ERR_PTR(rc);
  1988. }
  1989. /**
  1990. * of_register_spi_devices() - Register child devices onto the SPI bus
  1991. * @ctlr: Pointer to spi_controller device
  1992. *
  1993. * Registers an spi_device for each child node of controller node which
  1994. * represents a valid SPI slave.
  1995. */
  1996. static void of_register_spi_devices(struct spi_controller *ctlr)
  1997. {
  1998. struct spi_device *spi;
  1999. struct device_node *nc;
  2000. if (!ctlr->dev.of_node)
  2001. return;
  2002. for_each_available_child_of_node(ctlr->dev.of_node, nc) {
  2003. if (of_node_test_and_set_flag(nc, OF_POPULATED))
  2004. continue;
  2005. spi = of_register_spi_device(ctlr, nc);
  2006. if (IS_ERR(spi)) {
  2007. dev_warn(&ctlr->dev,
  2008. "Failed to create SPI device for %pOF\n", nc);
  2009. of_node_clear_flag(nc, OF_POPULATED);
  2010. }
  2011. }
  2012. }
  2013. #else
  2014. static void of_register_spi_devices(struct spi_controller *ctlr) { }
  2015. #endif
  2016. /**
  2017. * spi_new_ancillary_device() - Register ancillary SPI device
  2018. * @spi: Pointer to the main SPI device registering the ancillary device
  2019. * @chip_select: Chip Select of the ancillary device
  2020. *
  2021. * Register an ancillary SPI device; for example some chips have a chip-select
  2022. * for normal device usage and another one for setup/firmware upload.
  2023. *
  2024. * This may only be called from main SPI device's probe routine.
  2025. *
  2026. * Return: 0 on success; negative errno on failure
  2027. */
  2028. struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
  2029. u8 chip_select)
  2030. {
  2031. struct spi_device *ancillary;
  2032. int rc = 0;
  2033. /* Alloc an spi_device */
  2034. ancillary = spi_alloc_device(spi->controller);
  2035. if (!ancillary) {
  2036. rc = -ENOMEM;
  2037. goto err_out;
  2038. }
  2039. strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
  2040. /* Use provided chip-select for ancillary device */
  2041. ancillary->chip_select = chip_select;
  2042. /* Take over SPI mode/speed from SPI main device */
  2043. ancillary->max_speed_hz = spi->max_speed_hz;
  2044. ancillary->mode = spi->mode;
  2045. /* Register the new device */
  2046. rc = spi_add_device_locked(ancillary);
  2047. if (rc) {
  2048. dev_err(&spi->dev, "failed to register ancillary device\n");
  2049. goto err_out;
  2050. }
  2051. return ancillary;
  2052. err_out:
  2053. spi_dev_put(ancillary);
  2054. return ERR_PTR(rc);
  2055. }
  2056. EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
  2057. #ifdef CONFIG_ACPI
  2058. struct acpi_spi_lookup {
  2059. struct spi_controller *ctlr;
  2060. u32 max_speed_hz;
  2061. u32 mode;
  2062. int irq;
  2063. u8 bits_per_word;
  2064. u8 chip_select;
  2065. int n;
  2066. int index;
  2067. };
  2068. static int acpi_spi_count(struct acpi_resource *ares, void *data)
  2069. {
  2070. struct acpi_resource_spi_serialbus *sb;
  2071. int *count = data;
  2072. if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
  2073. return 1;
  2074. sb = &ares->data.spi_serial_bus;
  2075. if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
  2076. return 1;
  2077. *count = *count + 1;
  2078. return 1;
  2079. }
  2080. /**
  2081. * acpi_spi_count_resources - Count the number of SpiSerialBus resources
  2082. * @adev: ACPI device
  2083. *
  2084. * Returns the number of SpiSerialBus resources in the ACPI-device's
  2085. * resource-list; or a negative error code.
  2086. */
  2087. int acpi_spi_count_resources(struct acpi_device *adev)
  2088. {
  2089. LIST_HEAD(r);
  2090. int count = 0;
  2091. int ret;
  2092. ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
  2093. if (ret < 0)
  2094. return ret;
  2095. acpi_dev_free_resource_list(&r);
  2096. return count;
  2097. }
  2098. EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
  2099. static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
  2100. struct acpi_spi_lookup *lookup)
  2101. {
  2102. const union acpi_object *obj;
  2103. if (!x86_apple_machine)
  2104. return;
  2105. if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
  2106. && obj->buffer.length >= 4)
  2107. lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
  2108. if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
  2109. && obj->buffer.length == 8)
  2110. lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
  2111. if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
  2112. && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
  2113. lookup->mode |= SPI_LSB_FIRST;
  2114. if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
  2115. && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
  2116. lookup->mode |= SPI_CPOL;
  2117. if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
  2118. && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
  2119. lookup->mode |= SPI_CPHA;
  2120. }
  2121. static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
  2122. static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
  2123. {
  2124. struct acpi_spi_lookup *lookup = data;
  2125. struct spi_controller *ctlr = lookup->ctlr;
  2126. if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
  2127. struct acpi_resource_spi_serialbus *sb;
  2128. acpi_handle parent_handle;
  2129. acpi_status status;
  2130. sb = &ares->data.spi_serial_bus;
  2131. if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
  2132. if (lookup->index != -1 && lookup->n++ != lookup->index)
  2133. return 1;
  2134. status = acpi_get_handle(NULL,
  2135. sb->resource_source.string_ptr,
  2136. &parent_handle);
  2137. if (ACPI_FAILURE(status))
  2138. return -ENODEV;
  2139. if (ctlr) {
  2140. if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
  2141. return -ENODEV;
  2142. } else {
  2143. struct acpi_device *adev;
  2144. adev = acpi_fetch_acpi_dev(parent_handle);
  2145. if (!adev)
  2146. return -ENODEV;
  2147. ctlr = acpi_spi_find_controller_by_adev(adev);
  2148. if (!ctlr)
  2149. return -EPROBE_DEFER;
  2150. lookup->ctlr = ctlr;
  2151. }
  2152. /*
  2153. * ACPI DeviceSelection numbering is handled by the
  2154. * host controller driver in Windows and can vary
  2155. * from driver to driver. In Linux we always expect
  2156. * 0 .. max - 1 so we need to ask the driver to
  2157. * translate between the two schemes.
  2158. */
  2159. if (ctlr->fw_translate_cs) {
  2160. int cs = ctlr->fw_translate_cs(ctlr,
  2161. sb->device_selection);
  2162. if (cs < 0)
  2163. return cs;
  2164. lookup->chip_select = cs;
  2165. } else {
  2166. lookup->chip_select = sb->device_selection;
  2167. }
  2168. lookup->max_speed_hz = sb->connection_speed;
  2169. lookup->bits_per_word = sb->data_bit_length;
  2170. if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
  2171. lookup->mode |= SPI_CPHA;
  2172. if (sb->clock_polarity == ACPI_SPI_START_HIGH)
  2173. lookup->mode |= SPI_CPOL;
  2174. if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
  2175. lookup->mode |= SPI_CS_HIGH;
  2176. }
  2177. } else if (lookup->irq < 0) {
  2178. struct resource r;
  2179. if (acpi_dev_resource_interrupt(ares, 0, &r))
  2180. lookup->irq = r.start;
  2181. }
  2182. /* Always tell the ACPI core to skip this resource */
  2183. return 1;
  2184. }
  2185. /**
  2186. * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
  2187. * @ctlr: controller to which the spi device belongs
  2188. * @adev: ACPI Device for the spi device
  2189. * @index: Index of the spi resource inside the ACPI Node
  2190. *
  2191. * This should be used to allocate a new spi device from and ACPI Node.
  2192. * The caller is responsible for calling spi_add_device to register the spi device.
  2193. *
  2194. * If ctlr is set to NULL, the Controller for the spi device will be looked up
  2195. * using the resource.
  2196. * If index is set to -1, index is not used.
  2197. * Note: If index is -1, ctlr must be set.
  2198. *
  2199. * Return: a pointer to the new device, or ERR_PTR on error.
  2200. */
  2201. struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
  2202. struct acpi_device *adev,
  2203. int index)
  2204. {
  2205. acpi_handle parent_handle = NULL;
  2206. struct list_head resource_list;
  2207. struct acpi_spi_lookup lookup = {};
  2208. struct spi_device *spi;
  2209. int ret;
  2210. if (!ctlr && index == -1)
  2211. return ERR_PTR(-EINVAL);
  2212. lookup.ctlr = ctlr;
  2213. lookup.irq = -1;
  2214. lookup.index = index;
  2215. lookup.n = 0;
  2216. INIT_LIST_HEAD(&resource_list);
  2217. ret = acpi_dev_get_resources(adev, &resource_list,
  2218. acpi_spi_add_resource, &lookup);
  2219. acpi_dev_free_resource_list(&resource_list);
  2220. if (ret < 0)
  2221. /* Found SPI in _CRS but it points to another controller */
  2222. return ERR_PTR(ret);
  2223. if (!lookup.max_speed_hz &&
  2224. ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
  2225. ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
  2226. /* Apple does not use _CRS but nested devices for SPI slaves */
  2227. acpi_spi_parse_apple_properties(adev, &lookup);
  2228. }
  2229. if (!lookup.max_speed_hz)
  2230. return ERR_PTR(-ENODEV);
  2231. spi = spi_alloc_device(lookup.ctlr);
  2232. if (!spi) {
  2233. dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
  2234. dev_name(&adev->dev));
  2235. return ERR_PTR(-ENOMEM);
  2236. }
  2237. ACPI_COMPANION_SET(&spi->dev, adev);
  2238. spi->max_speed_hz = lookup.max_speed_hz;
  2239. spi->mode |= lookup.mode;
  2240. spi->irq = lookup.irq;
  2241. spi->bits_per_word = lookup.bits_per_word;
  2242. spi->chip_select = lookup.chip_select;
  2243. return spi;
  2244. }
  2245. EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
  2246. static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
  2247. struct acpi_device *adev)
  2248. {
  2249. struct spi_device *spi;
  2250. if (acpi_bus_get_status(adev) || !adev->status.present ||
  2251. acpi_device_enumerated(adev))
  2252. return AE_OK;
  2253. spi = acpi_spi_device_alloc(ctlr, adev, -1);
  2254. if (IS_ERR(spi)) {
  2255. if (PTR_ERR(spi) == -ENOMEM)
  2256. return AE_NO_MEMORY;
  2257. else
  2258. return AE_OK;
  2259. }
  2260. acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
  2261. sizeof(spi->modalias));
  2262. if (spi->irq < 0)
  2263. spi->irq = acpi_dev_gpio_irq_get(adev, 0);
  2264. acpi_device_set_enumerated(adev);
  2265. adev->power.flags.ignore_parent = true;
  2266. if (spi_add_device(spi)) {
  2267. adev->power.flags.ignore_parent = false;
  2268. dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
  2269. dev_name(&adev->dev));
  2270. spi_dev_put(spi);
  2271. }
  2272. return AE_OK;
  2273. }
  2274. static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
  2275. void *data, void **return_value)
  2276. {
  2277. struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
  2278. struct spi_controller *ctlr = data;
  2279. if (!adev)
  2280. return AE_OK;
  2281. return acpi_register_spi_device(ctlr, adev);
  2282. }
  2283. #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
  2284. static void acpi_register_spi_devices(struct spi_controller *ctlr)
  2285. {
  2286. acpi_status status;
  2287. acpi_handle handle;
  2288. handle = ACPI_HANDLE(ctlr->dev.parent);
  2289. if (!handle)
  2290. return;
  2291. status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  2292. SPI_ACPI_ENUMERATE_MAX_DEPTH,
  2293. acpi_spi_add_device, NULL, ctlr, NULL);
  2294. if (ACPI_FAILURE(status))
  2295. dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
  2296. }
  2297. #else
  2298. static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
  2299. #endif /* CONFIG_ACPI */
  2300. static void spi_controller_release(struct device *dev)
  2301. {
  2302. struct spi_controller *ctlr;
  2303. ctlr = container_of(dev, struct spi_controller, dev);
  2304. kfree(ctlr);
  2305. }
  2306. static struct class spi_master_class = {
  2307. .name = "spi_master",
  2308. .owner = THIS_MODULE,
  2309. .dev_release = spi_controller_release,
  2310. .dev_groups = spi_master_groups,
  2311. };
  2312. #ifdef CONFIG_SPI_SLAVE
  2313. /**
  2314. * spi_slave_abort - abort the ongoing transfer request on an SPI slave
  2315. * controller
  2316. * @spi: device used for the current transfer
  2317. */
  2318. int spi_slave_abort(struct spi_device *spi)
  2319. {
  2320. struct spi_controller *ctlr = spi->controller;
  2321. if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
  2322. return ctlr->slave_abort(ctlr);
  2323. return -ENOTSUPP;
  2324. }
  2325. EXPORT_SYMBOL_GPL(spi_slave_abort);
  2326. static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
  2327. char *buf)
  2328. {
  2329. struct spi_controller *ctlr = container_of(dev, struct spi_controller,
  2330. dev);
  2331. struct device *child;
  2332. child = device_find_any_child(&ctlr->dev);
  2333. return sprintf(buf, "%s\n",
  2334. child ? to_spi_device(child)->modalias : NULL);
  2335. }
  2336. static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
  2337. const char *buf, size_t count)
  2338. {
  2339. struct spi_controller *ctlr = container_of(dev, struct spi_controller,
  2340. dev);
  2341. struct spi_device *spi;
  2342. struct device *child;
  2343. char name[32];
  2344. int rc;
  2345. rc = sscanf(buf, "%31s", name);
  2346. if (rc != 1 || !name[0])
  2347. return -EINVAL;
  2348. child = device_find_any_child(&ctlr->dev);
  2349. if (child) {
  2350. /* Remove registered slave */
  2351. device_unregister(child);
  2352. put_device(child);
  2353. }
  2354. if (strcmp(name, "(null)")) {
  2355. /* Register new slave */
  2356. spi = spi_alloc_device(ctlr);
  2357. if (!spi)
  2358. return -ENOMEM;
  2359. strscpy(spi->modalias, name, sizeof(spi->modalias));
  2360. rc = spi_add_device(spi);
  2361. if (rc) {
  2362. spi_dev_put(spi);
  2363. return rc;
  2364. }
  2365. }
  2366. return count;
  2367. }
  2368. static DEVICE_ATTR_RW(slave);
  2369. static struct attribute *spi_slave_attrs[] = {
  2370. &dev_attr_slave.attr,
  2371. NULL,
  2372. };
  2373. static const struct attribute_group spi_slave_group = {
  2374. .attrs = spi_slave_attrs,
  2375. };
  2376. static const struct attribute_group *spi_slave_groups[] = {
  2377. &spi_controller_statistics_group,
  2378. &spi_slave_group,
  2379. NULL,
  2380. };
  2381. static struct class spi_slave_class = {
  2382. .name = "spi_slave",
  2383. .owner = THIS_MODULE,
  2384. .dev_release = spi_controller_release,
  2385. .dev_groups = spi_slave_groups,
  2386. };
  2387. #else
  2388. extern struct class spi_slave_class; /* dummy */
  2389. #endif
  2390. /**
  2391. * __spi_alloc_controller - allocate an SPI master or slave controller
  2392. * @dev: the controller, possibly using the platform_bus
  2393. * @size: how much zeroed driver-private data to allocate; the pointer to this
  2394. * memory is in the driver_data field of the returned device, accessible
  2395. * with spi_controller_get_devdata(); the memory is cacheline aligned;
  2396. * drivers granting DMA access to portions of their private data need to
  2397. * round up @size using ALIGN(size, dma_get_cache_alignment()).
  2398. * @slave: flag indicating whether to allocate an SPI master (false) or SPI
  2399. * slave (true) controller
  2400. * Context: can sleep
  2401. *
  2402. * This call is used only by SPI controller drivers, which are the
  2403. * only ones directly touching chip registers. It's how they allocate
  2404. * an spi_controller structure, prior to calling spi_register_controller().
  2405. *
  2406. * This must be called from context that can sleep.
  2407. *
  2408. * The caller is responsible for assigning the bus number and initializing the
  2409. * controller's methods before calling spi_register_controller(); and (after
  2410. * errors adding the device) calling spi_controller_put() to prevent a memory
  2411. * leak.
  2412. *
  2413. * Return: the SPI controller structure on success, else NULL.
  2414. */
  2415. struct spi_controller *__spi_alloc_controller(struct device *dev,
  2416. unsigned int size, bool slave)
  2417. {
  2418. struct spi_controller *ctlr;
  2419. size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
  2420. if (!dev)
  2421. return NULL;
  2422. ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
  2423. if (!ctlr)
  2424. return NULL;
  2425. device_initialize(&ctlr->dev);
  2426. INIT_LIST_HEAD(&ctlr->queue);
  2427. spin_lock_init(&ctlr->queue_lock);
  2428. spin_lock_init(&ctlr->bus_lock_spinlock);
  2429. mutex_init(&ctlr->bus_lock_mutex);
  2430. mutex_init(&ctlr->io_mutex);
  2431. mutex_init(&ctlr->add_lock);
  2432. ctlr->bus_num = -1;
  2433. ctlr->num_chipselect = 1;
  2434. ctlr->slave = slave;
  2435. if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
  2436. ctlr->dev.class = &spi_slave_class;
  2437. else
  2438. ctlr->dev.class = &spi_master_class;
  2439. ctlr->dev.parent = dev;
  2440. pm_suspend_ignore_children(&ctlr->dev, true);
  2441. spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
  2442. return ctlr;
  2443. }
  2444. EXPORT_SYMBOL_GPL(__spi_alloc_controller);
  2445. static void devm_spi_release_controller(struct device *dev, void *ctlr)
  2446. {
  2447. spi_controller_put(*(struct spi_controller **)ctlr);
  2448. }
  2449. /**
  2450. * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
  2451. * @dev: physical device of SPI controller
  2452. * @size: how much zeroed driver-private data to allocate
  2453. * @slave: whether to allocate an SPI master (false) or SPI slave (true)
  2454. * Context: can sleep
  2455. *
  2456. * Allocate an SPI controller and automatically release a reference on it
  2457. * when @dev is unbound from its driver. Drivers are thus relieved from
  2458. * having to call spi_controller_put().
  2459. *
  2460. * The arguments to this function are identical to __spi_alloc_controller().
  2461. *
  2462. * Return: the SPI controller structure on success, else NULL.
  2463. */
  2464. struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
  2465. unsigned int size,
  2466. bool slave)
  2467. {
  2468. struct spi_controller **ptr, *ctlr;
  2469. ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
  2470. GFP_KERNEL);
  2471. if (!ptr)
  2472. return NULL;
  2473. ctlr = __spi_alloc_controller(dev, size, slave);
  2474. if (ctlr) {
  2475. ctlr->devm_allocated = true;
  2476. *ptr = ctlr;
  2477. devres_add(dev, ptr);
  2478. } else {
  2479. devres_free(ptr);
  2480. }
  2481. return ctlr;
  2482. }
  2483. EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
  2484. /**
  2485. * spi_get_gpio_descs() - grab chip select GPIOs for the master
  2486. * @ctlr: The SPI master to grab GPIO descriptors for
  2487. */
  2488. static int spi_get_gpio_descs(struct spi_controller *ctlr)
  2489. {
  2490. int nb, i;
  2491. struct gpio_desc **cs;
  2492. struct device *dev = &ctlr->dev;
  2493. unsigned long native_cs_mask = 0;
  2494. unsigned int num_cs_gpios = 0;
  2495. nb = gpiod_count(dev, "cs");
  2496. if (nb < 0) {
  2497. /* No GPIOs at all is fine, else return the error */
  2498. if (nb == -ENOENT)
  2499. return 0;
  2500. return nb;
  2501. }
  2502. ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
  2503. cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
  2504. GFP_KERNEL);
  2505. if (!cs)
  2506. return -ENOMEM;
  2507. ctlr->cs_gpiods = cs;
  2508. for (i = 0; i < nb; i++) {
  2509. /*
  2510. * Most chipselects are active low, the inverted
  2511. * semantics are handled by special quirks in gpiolib,
  2512. * so initializing them GPIOD_OUT_LOW here means
  2513. * "unasserted", in most cases this will drive the physical
  2514. * line high.
  2515. */
  2516. cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
  2517. GPIOD_OUT_LOW);
  2518. if (IS_ERR(cs[i]))
  2519. return PTR_ERR(cs[i]);
  2520. if (cs[i]) {
  2521. /*
  2522. * If we find a CS GPIO, name it after the device and
  2523. * chip select line.
  2524. */
  2525. char *gpioname;
  2526. gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
  2527. dev_name(dev), i);
  2528. if (!gpioname)
  2529. return -ENOMEM;
  2530. gpiod_set_consumer_name(cs[i], gpioname);
  2531. num_cs_gpios++;
  2532. continue;
  2533. }
  2534. if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
  2535. dev_err(dev, "Invalid native chip select %d\n", i);
  2536. return -EINVAL;
  2537. }
  2538. native_cs_mask |= BIT(i);
  2539. }
  2540. ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
  2541. if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
  2542. ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
  2543. dev_err(dev, "No unused native chip select available\n");
  2544. return -EINVAL;
  2545. }
  2546. return 0;
  2547. }
  2548. static int spi_controller_check_ops(struct spi_controller *ctlr)
  2549. {
  2550. /*
  2551. * The controller may implement only the high-level SPI-memory like
  2552. * operations if it does not support regular SPI transfers, and this is
  2553. * valid use case.
  2554. * If ->mem_ops is NULL, we request that at least one of the
  2555. * ->transfer_xxx() method be implemented.
  2556. */
  2557. if (ctlr->mem_ops) {
  2558. if (!ctlr->mem_ops->exec_op)
  2559. return -EINVAL;
  2560. } else if (!ctlr->transfer && !ctlr->transfer_one &&
  2561. !ctlr->transfer_one_message) {
  2562. return -EINVAL;
  2563. }
  2564. return 0;
  2565. }
  2566. /**
  2567. * spi_register_controller - register SPI master or slave controller
  2568. * @ctlr: initialized master, originally from spi_alloc_master() or
  2569. * spi_alloc_slave()
  2570. * Context: can sleep
  2571. *
  2572. * SPI controllers connect to their drivers using some non-SPI bus,
  2573. * such as the platform bus. The final stage of probe() in that code
  2574. * includes calling spi_register_controller() to hook up to this SPI bus glue.
  2575. *
  2576. * SPI controllers use board specific (often SOC specific) bus numbers,
  2577. * and board-specific addressing for SPI devices combines those numbers
  2578. * with chip select numbers. Since SPI does not directly support dynamic
  2579. * device identification, boards need configuration tables telling which
  2580. * chip is at which address.
  2581. *
  2582. * This must be called from context that can sleep. It returns zero on
  2583. * success, else a negative error code (dropping the controller's refcount).
  2584. * After a successful return, the caller is responsible for calling
  2585. * spi_unregister_controller().
  2586. *
  2587. * Return: zero on success, else a negative error code.
  2588. */
  2589. int spi_register_controller(struct spi_controller *ctlr)
  2590. {
  2591. struct device *dev = ctlr->dev.parent;
  2592. struct boardinfo *bi;
  2593. int status;
  2594. int id, first_dynamic;
  2595. if (!dev)
  2596. return -ENODEV;
  2597. /*
  2598. * Make sure all necessary hooks are implemented before registering
  2599. * the SPI controller.
  2600. */
  2601. status = spi_controller_check_ops(ctlr);
  2602. if (status)
  2603. return status;
  2604. if (ctlr->bus_num >= 0) {
  2605. /* Devices with a fixed bus num must check-in with the num */
  2606. mutex_lock(&board_lock);
  2607. id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
  2608. ctlr->bus_num + 1, GFP_KERNEL);
  2609. mutex_unlock(&board_lock);
  2610. if (WARN(id < 0, "couldn't get idr"))
  2611. return id == -ENOSPC ? -EBUSY : id;
  2612. ctlr->bus_num = id;
  2613. } else if (ctlr->dev.of_node) {
  2614. /* Allocate dynamic bus number using Linux idr */
  2615. id = of_alias_get_id(ctlr->dev.of_node, "spi");
  2616. if (id >= 0) {
  2617. ctlr->bus_num = id;
  2618. mutex_lock(&board_lock);
  2619. id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
  2620. ctlr->bus_num + 1, GFP_KERNEL);
  2621. mutex_unlock(&board_lock);
  2622. if (WARN(id < 0, "couldn't get idr"))
  2623. return id == -ENOSPC ? -EBUSY : id;
  2624. }
  2625. }
  2626. if (ctlr->bus_num < 0) {
  2627. first_dynamic = of_alias_get_highest_id("spi");
  2628. if (first_dynamic < 0)
  2629. first_dynamic = 0;
  2630. else
  2631. first_dynamic++;
  2632. mutex_lock(&board_lock);
  2633. id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
  2634. 0, GFP_KERNEL);
  2635. mutex_unlock(&board_lock);
  2636. if (WARN(id < 0, "couldn't get idr"))
  2637. return id;
  2638. ctlr->bus_num = id;
  2639. }
  2640. ctlr->bus_lock_flag = 0;
  2641. init_completion(&ctlr->xfer_completion);
  2642. init_completion(&ctlr->cur_msg_completion);
  2643. if (!ctlr->max_dma_len)
  2644. ctlr->max_dma_len = INT_MAX;
  2645. /*
  2646. * Register the device, then userspace will see it.
  2647. * Registration fails if the bus ID is in use.
  2648. */
  2649. dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
  2650. if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
  2651. status = spi_get_gpio_descs(ctlr);
  2652. if (status)
  2653. goto free_bus_id;
  2654. /*
  2655. * A controller using GPIO descriptors always
  2656. * supports SPI_CS_HIGH if need be.
  2657. */
  2658. ctlr->mode_bits |= SPI_CS_HIGH;
  2659. }
  2660. /*
  2661. * Even if it's just one always-selected device, there must
  2662. * be at least one chipselect.
  2663. */
  2664. if (!ctlr->num_chipselect) {
  2665. status = -EINVAL;
  2666. goto free_bus_id;
  2667. }
  2668. /* Setting last_cs to -1 means no chip selected */
  2669. ctlr->last_cs = -1;
  2670. status = device_add(&ctlr->dev);
  2671. if (status < 0)
  2672. goto free_bus_id;
  2673. dev_dbg(dev, "registered %s %s\n",
  2674. spi_controller_is_slave(ctlr) ? "slave" : "master",
  2675. dev_name(&ctlr->dev));
  2676. /*
  2677. * If we're using a queued driver, start the queue. Note that we don't
  2678. * need the queueing logic if the driver is only supporting high-level
  2679. * memory operations.
  2680. */
  2681. if (ctlr->transfer) {
  2682. dev_info(dev, "controller is unqueued, this is deprecated\n");
  2683. } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
  2684. status = spi_controller_initialize_queue(ctlr);
  2685. if (status) {
  2686. device_del(&ctlr->dev);
  2687. goto free_bus_id;
  2688. }
  2689. }
  2690. /* Add statistics */
  2691. ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
  2692. if (!ctlr->pcpu_statistics) {
  2693. dev_err(dev, "Error allocating per-cpu statistics\n");
  2694. status = -ENOMEM;
  2695. goto destroy_queue;
  2696. }
  2697. mutex_lock(&board_lock);
  2698. list_add_tail(&ctlr->list, &spi_controller_list);
  2699. list_for_each_entry(bi, &board_list, list)
  2700. spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
  2701. mutex_unlock(&board_lock);
  2702. /* Register devices from the device tree and ACPI */
  2703. of_register_spi_devices(ctlr);
  2704. acpi_register_spi_devices(ctlr);
  2705. return status;
  2706. destroy_queue:
  2707. spi_destroy_queue(ctlr);
  2708. free_bus_id:
  2709. mutex_lock(&board_lock);
  2710. idr_remove(&spi_master_idr, ctlr->bus_num);
  2711. mutex_unlock(&board_lock);
  2712. return status;
  2713. }
  2714. EXPORT_SYMBOL_GPL(spi_register_controller);
  2715. static void devm_spi_unregister(struct device *dev, void *res)
  2716. {
  2717. spi_unregister_controller(*(struct spi_controller **)res);
  2718. }
  2719. /**
  2720. * devm_spi_register_controller - register managed SPI master or slave
  2721. * controller
  2722. * @dev: device managing SPI controller
  2723. * @ctlr: initialized controller, originally from spi_alloc_master() or
  2724. * spi_alloc_slave()
  2725. * Context: can sleep
  2726. *
  2727. * Register a SPI device as with spi_register_controller() which will
  2728. * automatically be unregistered and freed.
  2729. *
  2730. * Return: zero on success, else a negative error code.
  2731. */
  2732. int devm_spi_register_controller(struct device *dev,
  2733. struct spi_controller *ctlr)
  2734. {
  2735. struct spi_controller **ptr;
  2736. int ret;
  2737. ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
  2738. if (!ptr)
  2739. return -ENOMEM;
  2740. ret = spi_register_controller(ctlr);
  2741. if (!ret) {
  2742. *ptr = ctlr;
  2743. devres_add(dev, ptr);
  2744. } else {
  2745. devres_free(ptr);
  2746. }
  2747. return ret;
  2748. }
  2749. EXPORT_SYMBOL_GPL(devm_spi_register_controller);
  2750. static int __unregister(struct device *dev, void *null)
  2751. {
  2752. spi_unregister_device(to_spi_device(dev));
  2753. return 0;
  2754. }
  2755. /**
  2756. * spi_unregister_controller - unregister SPI master or slave controller
  2757. * @ctlr: the controller being unregistered
  2758. * Context: can sleep
  2759. *
  2760. * This call is used only by SPI controller drivers, which are the
  2761. * only ones directly touching chip registers.
  2762. *
  2763. * This must be called from context that can sleep.
  2764. *
  2765. * Note that this function also drops a reference to the controller.
  2766. */
  2767. void spi_unregister_controller(struct spi_controller *ctlr)
  2768. {
  2769. struct spi_controller *found;
  2770. int id = ctlr->bus_num;
  2771. /* Prevent addition of new devices, unregister existing ones */
  2772. if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
  2773. mutex_lock(&ctlr->add_lock);
  2774. device_for_each_child(&ctlr->dev, NULL, __unregister);
  2775. /* First make sure that this controller was ever added */
  2776. mutex_lock(&board_lock);
  2777. found = idr_find(&spi_master_idr, id);
  2778. mutex_unlock(&board_lock);
  2779. if (ctlr->queued) {
  2780. if (spi_destroy_queue(ctlr))
  2781. dev_err(&ctlr->dev, "queue remove failed\n");
  2782. }
  2783. mutex_lock(&board_lock);
  2784. list_del(&ctlr->list);
  2785. mutex_unlock(&board_lock);
  2786. device_del(&ctlr->dev);
  2787. /* Free bus id */
  2788. mutex_lock(&board_lock);
  2789. if (found == ctlr)
  2790. idr_remove(&spi_master_idr, id);
  2791. mutex_unlock(&board_lock);
  2792. if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
  2793. mutex_unlock(&ctlr->add_lock);
  2794. /* Release the last reference on the controller if its driver
  2795. * has not yet been converted to devm_spi_alloc_master/slave().
  2796. */
  2797. if (!ctlr->devm_allocated)
  2798. put_device(&ctlr->dev);
  2799. }
  2800. EXPORT_SYMBOL_GPL(spi_unregister_controller);
  2801. static inline int __spi_check_suspended(const struct spi_controller *ctlr)
  2802. {
  2803. return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
  2804. }
  2805. static inline void __spi_mark_suspended(struct spi_controller *ctlr)
  2806. {
  2807. mutex_lock(&ctlr->bus_lock_mutex);
  2808. ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
  2809. mutex_unlock(&ctlr->bus_lock_mutex);
  2810. }
  2811. static inline void __spi_mark_resumed(struct spi_controller *ctlr)
  2812. {
  2813. mutex_lock(&ctlr->bus_lock_mutex);
  2814. ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
  2815. mutex_unlock(&ctlr->bus_lock_mutex);
  2816. }
  2817. int spi_controller_suspend(struct spi_controller *ctlr)
  2818. {
  2819. int ret = 0;
  2820. /* Basically no-ops for non-queued controllers */
  2821. if (ctlr->queued) {
  2822. ret = spi_stop_queue(ctlr);
  2823. if (ret)
  2824. dev_err(&ctlr->dev, "queue stop failed\n");
  2825. }
  2826. __spi_mark_suspended(ctlr);
  2827. return ret;
  2828. }
  2829. EXPORT_SYMBOL_GPL(spi_controller_suspend);
  2830. int spi_controller_resume(struct spi_controller *ctlr)
  2831. {
  2832. int ret = 0;
  2833. __spi_mark_resumed(ctlr);
  2834. if (ctlr->queued) {
  2835. ret = spi_start_queue(ctlr);
  2836. if (ret)
  2837. dev_err(&ctlr->dev, "queue restart failed\n");
  2838. }
  2839. return ret;
  2840. }
  2841. EXPORT_SYMBOL_GPL(spi_controller_resume);
  2842. /*-------------------------------------------------------------------------*/
  2843. /* Core methods for spi_message alterations */
  2844. static void __spi_replace_transfers_release(struct spi_controller *ctlr,
  2845. struct spi_message *msg,
  2846. void *res)
  2847. {
  2848. struct spi_replaced_transfers *rxfer = res;
  2849. size_t i;
  2850. /* Call extra callback if requested */
  2851. if (rxfer->release)
  2852. rxfer->release(ctlr, msg, res);
  2853. /* Insert replaced transfers back into the message */
  2854. list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
  2855. /* Remove the formerly inserted entries */
  2856. for (i = 0; i < rxfer->inserted; i++)
  2857. list_del(&rxfer->inserted_transfers[i].transfer_list);
  2858. }
  2859. /**
  2860. * spi_replace_transfers - replace transfers with several transfers
  2861. * and register change with spi_message.resources
  2862. * @msg: the spi_message we work upon
  2863. * @xfer_first: the first spi_transfer we want to replace
  2864. * @remove: number of transfers to remove
  2865. * @insert: the number of transfers we want to insert instead
  2866. * @release: extra release code necessary in some circumstances
  2867. * @extradatasize: extra data to allocate (with alignment guarantees
  2868. * of struct @spi_transfer)
  2869. * @gfp: gfp flags
  2870. *
  2871. * Returns: pointer to @spi_replaced_transfers,
  2872. * PTR_ERR(...) in case of errors.
  2873. */
  2874. static struct spi_replaced_transfers *spi_replace_transfers(
  2875. struct spi_message *msg,
  2876. struct spi_transfer *xfer_first,
  2877. size_t remove,
  2878. size_t insert,
  2879. spi_replaced_release_t release,
  2880. size_t extradatasize,
  2881. gfp_t gfp)
  2882. {
  2883. struct spi_replaced_transfers *rxfer;
  2884. struct spi_transfer *xfer;
  2885. size_t i;
  2886. /* Allocate the structure using spi_res */
  2887. rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
  2888. struct_size(rxfer, inserted_transfers, insert)
  2889. + extradatasize,
  2890. gfp);
  2891. if (!rxfer)
  2892. return ERR_PTR(-ENOMEM);
  2893. /* The release code to invoke before running the generic release */
  2894. rxfer->release = release;
  2895. /* Assign extradata */
  2896. if (extradatasize)
  2897. rxfer->extradata =
  2898. &rxfer->inserted_transfers[insert];
  2899. /* Init the replaced_transfers list */
  2900. INIT_LIST_HEAD(&rxfer->replaced_transfers);
  2901. /*
  2902. * Assign the list_entry after which we should reinsert
  2903. * the @replaced_transfers - it may be spi_message.messages!
  2904. */
  2905. rxfer->replaced_after = xfer_first->transfer_list.prev;
  2906. /* Remove the requested number of transfers */
  2907. for (i = 0; i < remove; i++) {
  2908. /*
  2909. * If the entry after replaced_after it is msg->transfers
  2910. * then we have been requested to remove more transfers
  2911. * than are in the list.
  2912. */
  2913. if (rxfer->replaced_after->next == &msg->transfers) {
  2914. dev_err(&msg->spi->dev,
  2915. "requested to remove more spi_transfers than are available\n");
  2916. /* Insert replaced transfers back into the message */
  2917. list_splice(&rxfer->replaced_transfers,
  2918. rxfer->replaced_after);
  2919. /* Free the spi_replace_transfer structure... */
  2920. spi_res_free(rxfer);
  2921. /* ...and return with an error */
  2922. return ERR_PTR(-EINVAL);
  2923. }
  2924. /*
  2925. * Remove the entry after replaced_after from list of
  2926. * transfers and add it to list of replaced_transfers.
  2927. */
  2928. list_move_tail(rxfer->replaced_after->next,
  2929. &rxfer->replaced_transfers);
  2930. }
  2931. /*
  2932. * Create copy of the given xfer with identical settings
  2933. * based on the first transfer to get removed.
  2934. */
  2935. for (i = 0; i < insert; i++) {
  2936. /* We need to run in reverse order */
  2937. xfer = &rxfer->inserted_transfers[insert - 1 - i];
  2938. /* Copy all spi_transfer data */
  2939. memcpy(xfer, xfer_first, sizeof(*xfer));
  2940. /* Add to list */
  2941. list_add(&xfer->transfer_list, rxfer->replaced_after);
  2942. /* Clear cs_change and delay for all but the last */
  2943. if (i) {
  2944. xfer->cs_change = false;
  2945. xfer->delay.value = 0;
  2946. }
  2947. }
  2948. /* Set up inserted... */
  2949. rxfer->inserted = insert;
  2950. /* ...and register it with spi_res/spi_message */
  2951. spi_res_add(msg, rxfer);
  2952. return rxfer;
  2953. }
  2954. static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
  2955. struct spi_message *msg,
  2956. struct spi_transfer **xferp,
  2957. size_t maxsize,
  2958. gfp_t gfp)
  2959. {
  2960. struct spi_transfer *xfer = *xferp, *xfers;
  2961. struct spi_replaced_transfers *srt;
  2962. size_t offset;
  2963. size_t count, i;
  2964. /* Calculate how many we have to replace */
  2965. count = DIV_ROUND_UP(xfer->len, maxsize);
  2966. /* Create replacement */
  2967. srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
  2968. if (IS_ERR(srt))
  2969. return PTR_ERR(srt);
  2970. xfers = srt->inserted_transfers;
  2971. /*
  2972. * Now handle each of those newly inserted spi_transfers.
  2973. * Note that the replacements spi_transfers all are preset
  2974. * to the same values as *xferp, so tx_buf, rx_buf and len
  2975. * are all identical (as well as most others)
  2976. * so we just have to fix up len and the pointers.
  2977. *
  2978. * This also includes support for the depreciated
  2979. * spi_message.is_dma_mapped interface.
  2980. */
  2981. /*
  2982. * The first transfer just needs the length modified, so we
  2983. * run it outside the loop.
  2984. */
  2985. xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
  2986. /* All the others need rx_buf/tx_buf also set */
  2987. for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
  2988. /* Update rx_buf, tx_buf and dma */
  2989. if (xfers[i].rx_buf)
  2990. xfers[i].rx_buf += offset;
  2991. if (xfers[i].rx_dma)
  2992. xfers[i].rx_dma += offset;
  2993. if (xfers[i].tx_buf)
  2994. xfers[i].tx_buf += offset;
  2995. if (xfers[i].tx_dma)
  2996. xfers[i].tx_dma += offset;
  2997. /* Update length */
  2998. xfers[i].len = min(maxsize, xfers[i].len - offset);
  2999. }
  3000. /*
  3001. * We set up xferp to the last entry we have inserted,
  3002. * so that we skip those already split transfers.
  3003. */
  3004. *xferp = &xfers[count - 1];
  3005. /* Increment statistics counters */
  3006. SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
  3007. transfers_split_maxsize);
  3008. SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
  3009. transfers_split_maxsize);
  3010. return 0;
  3011. }
  3012. /**
  3013. * spi_split_transfers_maxsize - split spi transfers into multiple transfers
  3014. * when an individual transfer exceeds a
  3015. * certain size
  3016. * @ctlr: the @spi_controller for this transfer
  3017. * @msg: the @spi_message to transform
  3018. * @maxsize: the maximum when to apply this
  3019. * @gfp: GFP allocation flags
  3020. *
  3021. * Return: status of transformation
  3022. */
  3023. int spi_split_transfers_maxsize(struct spi_controller *ctlr,
  3024. struct spi_message *msg,
  3025. size_t maxsize,
  3026. gfp_t gfp)
  3027. {
  3028. struct spi_transfer *xfer;
  3029. int ret;
  3030. /*
  3031. * Iterate over the transfer_list,
  3032. * but note that xfer is advanced to the last transfer inserted
  3033. * to avoid checking sizes again unnecessarily (also xfer does
  3034. * potentially belong to a different list by the time the
  3035. * replacement has happened).
  3036. */
  3037. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  3038. if (xfer->len > maxsize) {
  3039. ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
  3040. maxsize, gfp);
  3041. if (ret)
  3042. return ret;
  3043. }
  3044. }
  3045. return 0;
  3046. }
  3047. EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
  3048. /*-------------------------------------------------------------------------*/
  3049. /* Core methods for SPI controller protocol drivers. Some of the
  3050. * other core methods are currently defined as inline functions.
  3051. */
  3052. static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
  3053. u8 bits_per_word)
  3054. {
  3055. if (ctlr->bits_per_word_mask) {
  3056. /* Only 32 bits fit in the mask */
  3057. if (bits_per_word > 32)
  3058. return -EINVAL;
  3059. if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
  3060. return -EINVAL;
  3061. }
  3062. return 0;
  3063. }
  3064. /**
  3065. * spi_setup - setup SPI mode and clock rate
  3066. * @spi: the device whose settings are being modified
  3067. * Context: can sleep, and no requests are queued to the device
  3068. *
  3069. * SPI protocol drivers may need to update the transfer mode if the
  3070. * device doesn't work with its default. They may likewise need
  3071. * to update clock rates or word sizes from initial values. This function
  3072. * changes those settings, and must be called from a context that can sleep.
  3073. * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
  3074. * effect the next time the device is selected and data is transferred to
  3075. * or from it. When this function returns, the spi device is deselected.
  3076. *
  3077. * Note that this call will fail if the protocol driver specifies an option
  3078. * that the underlying controller or its driver does not support. For
  3079. * example, not all hardware supports wire transfers using nine bit words,
  3080. * LSB-first wire encoding, or active-high chipselects.
  3081. *
  3082. * Return: zero on success, else a negative error code.
  3083. */
  3084. int spi_setup(struct spi_device *spi)
  3085. {
  3086. unsigned bad_bits, ugly_bits;
  3087. int status = 0;
  3088. /*
  3089. * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
  3090. * are set at the same time.
  3091. */
  3092. if ((hweight_long(spi->mode &
  3093. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
  3094. (hweight_long(spi->mode &
  3095. (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
  3096. dev_err(&spi->dev,
  3097. "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
  3098. return -EINVAL;
  3099. }
  3100. /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
  3101. if ((spi->mode & SPI_3WIRE) && (spi->mode &
  3102. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
  3103. SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
  3104. return -EINVAL;
  3105. /*
  3106. * Help drivers fail *cleanly* when they need options
  3107. * that aren't supported with their current controller.
  3108. * SPI_CS_WORD has a fallback software implementation,
  3109. * so it is ignored here.
  3110. */
  3111. bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
  3112. SPI_NO_TX | SPI_NO_RX);
  3113. ugly_bits = bad_bits &
  3114. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
  3115. SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
  3116. if (ugly_bits) {
  3117. dev_warn(&spi->dev,
  3118. "setup: ignoring unsupported mode bits %x\n",
  3119. ugly_bits);
  3120. spi->mode &= ~ugly_bits;
  3121. bad_bits &= ~ugly_bits;
  3122. }
  3123. if (bad_bits) {
  3124. dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
  3125. bad_bits);
  3126. return -EINVAL;
  3127. }
  3128. if (!spi->bits_per_word) {
  3129. spi->bits_per_word = 8;
  3130. } else {
  3131. /*
  3132. * Some controllers may not support the default 8 bits-per-word
  3133. * so only perform the check when this is explicitly provided.
  3134. */
  3135. status = __spi_validate_bits_per_word(spi->controller,
  3136. spi->bits_per_word);
  3137. if (status)
  3138. return status;
  3139. }
  3140. if (spi->controller->max_speed_hz &&
  3141. (!spi->max_speed_hz ||
  3142. spi->max_speed_hz > spi->controller->max_speed_hz))
  3143. spi->max_speed_hz = spi->controller->max_speed_hz;
  3144. mutex_lock(&spi->controller->io_mutex);
  3145. if (spi->controller->setup) {
  3146. status = spi->controller->setup(spi);
  3147. if (status) {
  3148. mutex_unlock(&spi->controller->io_mutex);
  3149. dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
  3150. status);
  3151. return status;
  3152. }
  3153. }
  3154. if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
  3155. status = pm_runtime_resume_and_get(spi->controller->dev.parent);
  3156. if (status < 0) {
  3157. mutex_unlock(&spi->controller->io_mutex);
  3158. dev_err(&spi->controller->dev, "Failed to power device: %d\n",
  3159. status);
  3160. return status;
  3161. }
  3162. /*
  3163. * We do not want to return positive value from pm_runtime_get,
  3164. * there are many instances of devices calling spi_setup() and
  3165. * checking for a non-zero return value instead of a negative
  3166. * return value.
  3167. */
  3168. status = 0;
  3169. spi_set_cs(spi, false, true);
  3170. pm_runtime_mark_last_busy(spi->controller->dev.parent);
  3171. pm_runtime_put_autosuspend(spi->controller->dev.parent);
  3172. } else {
  3173. spi_set_cs(spi, false, true);
  3174. }
  3175. mutex_unlock(&spi->controller->io_mutex);
  3176. if (spi->rt && !spi->controller->rt) {
  3177. spi->controller->rt = true;
  3178. spi_set_thread_rt(spi->controller);
  3179. }
  3180. trace_spi_setup(spi, status);
  3181. dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
  3182. spi->mode & SPI_MODE_X_MASK,
  3183. (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
  3184. (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
  3185. (spi->mode & SPI_3WIRE) ? "3wire, " : "",
  3186. (spi->mode & SPI_LOOP) ? "loopback, " : "",
  3187. spi->bits_per_word, spi->max_speed_hz,
  3188. status);
  3189. return status;
  3190. }
  3191. EXPORT_SYMBOL_GPL(spi_setup);
  3192. static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
  3193. struct spi_device *spi)
  3194. {
  3195. int delay1, delay2;
  3196. delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
  3197. if (delay1 < 0)
  3198. return delay1;
  3199. delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
  3200. if (delay2 < 0)
  3201. return delay2;
  3202. if (delay1 < delay2)
  3203. memcpy(&xfer->word_delay, &spi->word_delay,
  3204. sizeof(xfer->word_delay));
  3205. return 0;
  3206. }
  3207. static int __spi_validate(struct spi_device *spi, struct spi_message *message)
  3208. {
  3209. struct spi_controller *ctlr = spi->controller;
  3210. struct spi_transfer *xfer;
  3211. int w_size;
  3212. if (list_empty(&message->transfers))
  3213. return -EINVAL;
  3214. /*
  3215. * If an SPI controller does not support toggling the CS line on each
  3216. * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
  3217. * for the CS line, we can emulate the CS-per-word hardware function by
  3218. * splitting transfers into one-word transfers and ensuring that
  3219. * cs_change is set for each transfer.
  3220. */
  3221. if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
  3222. spi->cs_gpiod)) {
  3223. size_t maxsize;
  3224. int ret;
  3225. maxsize = (spi->bits_per_word + 7) / 8;
  3226. /* spi_split_transfers_maxsize() requires message->spi */
  3227. message->spi = spi;
  3228. ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
  3229. GFP_KERNEL);
  3230. if (ret)
  3231. return ret;
  3232. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3233. /* Don't change cs_change on the last entry in the list */
  3234. if (list_is_last(&xfer->transfer_list, &message->transfers))
  3235. break;
  3236. xfer->cs_change = 1;
  3237. }
  3238. }
  3239. /*
  3240. * Half-duplex links include original MicroWire, and ones with
  3241. * only one data pin like SPI_3WIRE (switches direction) or where
  3242. * either MOSI or MISO is missing. They can also be caused by
  3243. * software limitations.
  3244. */
  3245. if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
  3246. (spi->mode & SPI_3WIRE)) {
  3247. unsigned flags = ctlr->flags;
  3248. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3249. if (xfer->rx_buf && xfer->tx_buf)
  3250. return -EINVAL;
  3251. if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
  3252. return -EINVAL;
  3253. if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
  3254. return -EINVAL;
  3255. }
  3256. }
  3257. /*
  3258. * Set transfer bits_per_word and max speed as spi device default if
  3259. * it is not set for this transfer.
  3260. * Set transfer tx_nbits and rx_nbits as single transfer default
  3261. * (SPI_NBITS_SINGLE) if it is not set for this transfer.
  3262. * Ensure transfer word_delay is at least as long as that required by
  3263. * device itself.
  3264. */
  3265. message->frame_length = 0;
  3266. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3267. xfer->effective_speed_hz = 0;
  3268. message->frame_length += xfer->len;
  3269. if (!xfer->bits_per_word)
  3270. xfer->bits_per_word = spi->bits_per_word;
  3271. if (!xfer->speed_hz)
  3272. xfer->speed_hz = spi->max_speed_hz;
  3273. if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
  3274. xfer->speed_hz = ctlr->max_speed_hz;
  3275. if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
  3276. return -EINVAL;
  3277. /*
  3278. * SPI transfer length should be multiple of SPI word size
  3279. * where SPI word size should be power-of-two multiple.
  3280. */
  3281. if (xfer->bits_per_word <= 8)
  3282. w_size = 1;
  3283. else if (xfer->bits_per_word <= 16)
  3284. w_size = 2;
  3285. else
  3286. w_size = 4;
  3287. /* No partial transfers accepted */
  3288. if (xfer->len % w_size)
  3289. return -EINVAL;
  3290. if (xfer->speed_hz && ctlr->min_speed_hz &&
  3291. xfer->speed_hz < ctlr->min_speed_hz)
  3292. return -EINVAL;
  3293. if (xfer->tx_buf && !xfer->tx_nbits)
  3294. xfer->tx_nbits = SPI_NBITS_SINGLE;
  3295. if (xfer->rx_buf && !xfer->rx_nbits)
  3296. xfer->rx_nbits = SPI_NBITS_SINGLE;
  3297. /*
  3298. * Check transfer tx/rx_nbits:
  3299. * 1. check the value matches one of single, dual and quad
  3300. * 2. check tx/rx_nbits match the mode in spi_device
  3301. */
  3302. if (xfer->tx_buf) {
  3303. if (spi->mode & SPI_NO_TX)
  3304. return -EINVAL;
  3305. if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
  3306. xfer->tx_nbits != SPI_NBITS_DUAL &&
  3307. xfer->tx_nbits != SPI_NBITS_QUAD)
  3308. return -EINVAL;
  3309. if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
  3310. !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
  3311. return -EINVAL;
  3312. if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
  3313. !(spi->mode & SPI_TX_QUAD))
  3314. return -EINVAL;
  3315. }
  3316. /* Check transfer rx_nbits */
  3317. if (xfer->rx_buf) {
  3318. if (spi->mode & SPI_NO_RX)
  3319. return -EINVAL;
  3320. if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
  3321. xfer->rx_nbits != SPI_NBITS_DUAL &&
  3322. xfer->rx_nbits != SPI_NBITS_QUAD)
  3323. return -EINVAL;
  3324. if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
  3325. !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
  3326. return -EINVAL;
  3327. if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
  3328. !(spi->mode & SPI_RX_QUAD))
  3329. return -EINVAL;
  3330. }
  3331. if (_spi_xfer_word_delay_update(xfer, spi))
  3332. return -EINVAL;
  3333. }
  3334. message->status = -EINPROGRESS;
  3335. return 0;
  3336. }
  3337. static int __spi_async(struct spi_device *spi, struct spi_message *message)
  3338. {
  3339. struct spi_controller *ctlr = spi->controller;
  3340. struct spi_transfer *xfer;
  3341. /*
  3342. * Some controllers do not support doing regular SPI transfers. Return
  3343. * ENOTSUPP when this is the case.
  3344. */
  3345. if (!ctlr->transfer)
  3346. return -ENOTSUPP;
  3347. message->spi = spi;
  3348. SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
  3349. SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
  3350. trace_spi_message_submit(message);
  3351. if (!ctlr->ptp_sts_supported) {
  3352. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3353. xfer->ptp_sts_word_pre = 0;
  3354. ptp_read_system_prets(xfer->ptp_sts);
  3355. }
  3356. }
  3357. return ctlr->transfer(spi, message);
  3358. }
  3359. /**
  3360. * spi_async - asynchronous SPI transfer
  3361. * @spi: device with which data will be exchanged
  3362. * @message: describes the data transfers, including completion callback
  3363. * Context: any (irqs may be blocked, etc)
  3364. *
  3365. * This call may be used in_irq and other contexts which can't sleep,
  3366. * as well as from task contexts which can sleep.
  3367. *
  3368. * The completion callback is invoked in a context which can't sleep.
  3369. * Before that invocation, the value of message->status is undefined.
  3370. * When the callback is issued, message->status holds either zero (to
  3371. * indicate complete success) or a negative error code. After that
  3372. * callback returns, the driver which issued the transfer request may
  3373. * deallocate the associated memory; it's no longer in use by any SPI
  3374. * core or controller driver code.
  3375. *
  3376. * Note that although all messages to a spi_device are handled in
  3377. * FIFO order, messages may go to different devices in other orders.
  3378. * Some device might be higher priority, or have various "hard" access
  3379. * time requirements, for example.
  3380. *
  3381. * On detection of any fault during the transfer, processing of
  3382. * the entire message is aborted, and the device is deselected.
  3383. * Until returning from the associated message completion callback,
  3384. * no other spi_message queued to that device will be processed.
  3385. * (This rule applies equally to all the synchronous transfer calls,
  3386. * which are wrappers around this core asynchronous primitive.)
  3387. *
  3388. * Return: zero on success, else a negative error code.
  3389. */
  3390. int spi_async(struct spi_device *spi, struct spi_message *message)
  3391. {
  3392. struct spi_controller *ctlr = spi->controller;
  3393. int ret;
  3394. unsigned long flags;
  3395. ret = __spi_validate(spi, message);
  3396. if (ret != 0)
  3397. return ret;
  3398. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3399. if (ctlr->bus_lock_flag)
  3400. ret = -EBUSY;
  3401. else
  3402. ret = __spi_async(spi, message);
  3403. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3404. return ret;
  3405. }
  3406. EXPORT_SYMBOL_GPL(spi_async);
  3407. /**
  3408. * spi_async_locked - version of spi_async with exclusive bus usage
  3409. * @spi: device with which data will be exchanged
  3410. * @message: describes the data transfers, including completion callback
  3411. * Context: any (irqs may be blocked, etc)
  3412. *
  3413. * This call may be used in_irq and other contexts which can't sleep,
  3414. * as well as from task contexts which can sleep.
  3415. *
  3416. * The completion callback is invoked in a context which can't sleep.
  3417. * Before that invocation, the value of message->status is undefined.
  3418. * When the callback is issued, message->status holds either zero (to
  3419. * indicate complete success) or a negative error code. After that
  3420. * callback returns, the driver which issued the transfer request may
  3421. * deallocate the associated memory; it's no longer in use by any SPI
  3422. * core or controller driver code.
  3423. *
  3424. * Note that although all messages to a spi_device are handled in
  3425. * FIFO order, messages may go to different devices in other orders.
  3426. * Some device might be higher priority, or have various "hard" access
  3427. * time requirements, for example.
  3428. *
  3429. * On detection of any fault during the transfer, processing of
  3430. * the entire message is aborted, and the device is deselected.
  3431. * Until returning from the associated message completion callback,
  3432. * no other spi_message queued to that device will be processed.
  3433. * (This rule applies equally to all the synchronous transfer calls,
  3434. * which are wrappers around this core asynchronous primitive.)
  3435. *
  3436. * Return: zero on success, else a negative error code.
  3437. */
  3438. static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
  3439. {
  3440. struct spi_controller *ctlr = spi->controller;
  3441. int ret;
  3442. unsigned long flags;
  3443. ret = __spi_validate(spi, message);
  3444. if (ret != 0)
  3445. return ret;
  3446. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3447. ret = __spi_async(spi, message);
  3448. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3449. return ret;
  3450. }
  3451. static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
  3452. {
  3453. bool was_busy;
  3454. int ret;
  3455. mutex_lock(&ctlr->io_mutex);
  3456. was_busy = ctlr->busy;
  3457. ctlr->cur_msg = msg;
  3458. ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
  3459. if (ret)
  3460. dev_err(&ctlr->dev, "noqueue transfer failed\n");
  3461. ctlr->cur_msg = NULL;
  3462. ctlr->fallback = false;
  3463. if (!was_busy) {
  3464. kfree(ctlr->dummy_rx);
  3465. ctlr->dummy_rx = NULL;
  3466. kfree(ctlr->dummy_tx);
  3467. ctlr->dummy_tx = NULL;
  3468. if (ctlr->unprepare_transfer_hardware &&
  3469. ctlr->unprepare_transfer_hardware(ctlr))
  3470. dev_err(&ctlr->dev,
  3471. "failed to unprepare transfer hardware\n");
  3472. spi_idle_runtime_pm(ctlr);
  3473. }
  3474. mutex_unlock(&ctlr->io_mutex);
  3475. }
  3476. /*-------------------------------------------------------------------------*/
  3477. /*
  3478. * Utility methods for SPI protocol drivers, layered on
  3479. * top of the core. Some other utility methods are defined as
  3480. * inline functions.
  3481. */
  3482. static void spi_complete(void *arg)
  3483. {
  3484. complete(arg);
  3485. }
  3486. static int __spi_sync(struct spi_device *spi, struct spi_message *message)
  3487. {
  3488. DECLARE_COMPLETION_ONSTACK(done);
  3489. int status;
  3490. struct spi_controller *ctlr = spi->controller;
  3491. if (__spi_check_suspended(ctlr)) {
  3492. dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
  3493. return -ESHUTDOWN;
  3494. }
  3495. status = __spi_validate(spi, message);
  3496. if (status != 0)
  3497. return status;
  3498. message->spi = spi;
  3499. SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
  3500. SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
  3501. /*
  3502. * Checking queue_empty here only guarantees async/sync message
  3503. * ordering when coming from the same context. It does not need to
  3504. * guard against reentrancy from a different context. The io_mutex
  3505. * will catch those cases.
  3506. */
  3507. if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
  3508. message->actual_length = 0;
  3509. message->status = -EINPROGRESS;
  3510. trace_spi_message_submit(message);
  3511. SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
  3512. SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
  3513. __spi_transfer_message_noqueue(ctlr, message);
  3514. return message->status;
  3515. }
  3516. /*
  3517. * There are messages in the async queue that could have originated
  3518. * from the same context, so we need to preserve ordering.
  3519. * Therefor we send the message to the async queue and wait until they
  3520. * are completed.
  3521. */
  3522. message->complete = spi_complete;
  3523. message->context = &done;
  3524. status = spi_async_locked(spi, message);
  3525. if (status == 0) {
  3526. wait_for_completion(&done);
  3527. status = message->status;
  3528. }
  3529. message->context = NULL;
  3530. return status;
  3531. }
  3532. /**
  3533. * spi_sync - blocking/synchronous SPI data transfers
  3534. * @spi: device with which data will be exchanged
  3535. * @message: describes the data transfers
  3536. * Context: can sleep
  3537. *
  3538. * This call may only be used from a context that may sleep. The sleep
  3539. * is non-interruptible, and has no timeout. Low-overhead controller
  3540. * drivers may DMA directly into and out of the message buffers.
  3541. *
  3542. * Note that the SPI device's chip select is active during the message,
  3543. * and then is normally disabled between messages. Drivers for some
  3544. * frequently-used devices may want to minimize costs of selecting a chip,
  3545. * by leaving it selected in anticipation that the next message will go
  3546. * to the same chip. (That may increase power usage.)
  3547. *
  3548. * Also, the caller is guaranteeing that the memory associated with the
  3549. * message will not be freed before this call returns.
  3550. *
  3551. * Return: zero on success, else a negative error code.
  3552. */
  3553. int spi_sync(struct spi_device *spi, struct spi_message *message)
  3554. {
  3555. int ret;
  3556. mutex_lock(&spi->controller->bus_lock_mutex);
  3557. ret = __spi_sync(spi, message);
  3558. mutex_unlock(&spi->controller->bus_lock_mutex);
  3559. return ret;
  3560. }
  3561. EXPORT_SYMBOL_GPL(spi_sync);
  3562. /**
  3563. * spi_sync_locked - version of spi_sync with exclusive bus usage
  3564. * @spi: device with which data will be exchanged
  3565. * @message: describes the data transfers
  3566. * Context: can sleep
  3567. *
  3568. * This call may only be used from a context that may sleep. The sleep
  3569. * is non-interruptible, and has no timeout. Low-overhead controller
  3570. * drivers may DMA directly into and out of the message buffers.
  3571. *
  3572. * This call should be used by drivers that require exclusive access to the
  3573. * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
  3574. * be released by a spi_bus_unlock call when the exclusive access is over.
  3575. *
  3576. * Return: zero on success, else a negative error code.
  3577. */
  3578. int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
  3579. {
  3580. return __spi_sync(spi, message);
  3581. }
  3582. EXPORT_SYMBOL_GPL(spi_sync_locked);
  3583. /**
  3584. * spi_bus_lock - obtain a lock for exclusive SPI bus usage
  3585. * @ctlr: SPI bus master that should be locked for exclusive bus access
  3586. * Context: can sleep
  3587. *
  3588. * This call may only be used from a context that may sleep. The sleep
  3589. * is non-interruptible, and has no timeout.
  3590. *
  3591. * This call should be used by drivers that require exclusive access to the
  3592. * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
  3593. * exclusive access is over. Data transfer must be done by spi_sync_locked
  3594. * and spi_async_locked calls when the SPI bus lock is held.
  3595. *
  3596. * Return: always zero.
  3597. */
  3598. int spi_bus_lock(struct spi_controller *ctlr)
  3599. {
  3600. unsigned long flags;
  3601. mutex_lock(&ctlr->bus_lock_mutex);
  3602. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3603. ctlr->bus_lock_flag = 1;
  3604. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3605. /* Mutex remains locked until spi_bus_unlock() is called */
  3606. return 0;
  3607. }
  3608. EXPORT_SYMBOL_GPL(spi_bus_lock);
  3609. /**
  3610. * spi_bus_unlock - release the lock for exclusive SPI bus usage
  3611. * @ctlr: SPI bus master that was locked for exclusive bus access
  3612. * Context: can sleep
  3613. *
  3614. * This call may only be used from a context that may sleep. The sleep
  3615. * is non-interruptible, and has no timeout.
  3616. *
  3617. * This call releases an SPI bus lock previously obtained by an spi_bus_lock
  3618. * call.
  3619. *
  3620. * Return: always zero.
  3621. */
  3622. int spi_bus_unlock(struct spi_controller *ctlr)
  3623. {
  3624. ctlr->bus_lock_flag = 0;
  3625. mutex_unlock(&ctlr->bus_lock_mutex);
  3626. return 0;
  3627. }
  3628. EXPORT_SYMBOL_GPL(spi_bus_unlock);
  3629. /* Portable code must never pass more than 32 bytes */
  3630. #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
  3631. static u8 *buf;
  3632. /**
  3633. * spi_write_then_read - SPI synchronous write followed by read
  3634. * @spi: device with which data will be exchanged
  3635. * @txbuf: data to be written (need not be dma-safe)
  3636. * @n_tx: size of txbuf, in bytes
  3637. * @rxbuf: buffer into which data will be read (need not be dma-safe)
  3638. * @n_rx: size of rxbuf, in bytes
  3639. * Context: can sleep
  3640. *
  3641. * This performs a half duplex MicroWire style transaction with the
  3642. * device, sending txbuf and then reading rxbuf. The return value
  3643. * is zero for success, else a negative errno status code.
  3644. * This call may only be used from a context that may sleep.
  3645. *
  3646. * Parameters to this routine are always copied using a small buffer.
  3647. * Performance-sensitive or bulk transfer code should instead use
  3648. * spi_{async,sync}() calls with dma-safe buffers.
  3649. *
  3650. * Return: zero on success, else a negative error code.
  3651. */
  3652. int spi_write_then_read(struct spi_device *spi,
  3653. const void *txbuf, unsigned n_tx,
  3654. void *rxbuf, unsigned n_rx)
  3655. {
  3656. static DEFINE_MUTEX(lock);
  3657. int status;
  3658. struct spi_message message;
  3659. struct spi_transfer x[2];
  3660. u8 *local_buf;
  3661. /*
  3662. * Use preallocated DMA-safe buffer if we can. We can't avoid
  3663. * copying here, (as a pure convenience thing), but we can
  3664. * keep heap costs out of the hot path unless someone else is
  3665. * using the pre-allocated buffer or the transfer is too large.
  3666. */
  3667. if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
  3668. local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
  3669. GFP_KERNEL | GFP_DMA);
  3670. if (!local_buf)
  3671. return -ENOMEM;
  3672. } else {
  3673. local_buf = buf;
  3674. }
  3675. spi_message_init(&message);
  3676. memset(x, 0, sizeof(x));
  3677. if (n_tx) {
  3678. x[0].len = n_tx;
  3679. spi_message_add_tail(&x[0], &message);
  3680. }
  3681. if (n_rx) {
  3682. x[1].len = n_rx;
  3683. spi_message_add_tail(&x[1], &message);
  3684. }
  3685. memcpy(local_buf, txbuf, n_tx);
  3686. x[0].tx_buf = local_buf;
  3687. x[1].rx_buf = local_buf + n_tx;
  3688. /* Do the i/o */
  3689. status = spi_sync(spi, &message);
  3690. if (status == 0)
  3691. memcpy(rxbuf, x[1].rx_buf, n_rx);
  3692. if (x[0].tx_buf == buf)
  3693. mutex_unlock(&lock);
  3694. else
  3695. kfree(local_buf);
  3696. return status;
  3697. }
  3698. EXPORT_SYMBOL_GPL(spi_write_then_read);
  3699. /*-------------------------------------------------------------------------*/
  3700. #if IS_ENABLED(CONFIG_OF_DYNAMIC)
  3701. /* Must call put_device() when done with returned spi_device device */
  3702. static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
  3703. {
  3704. struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
  3705. return dev ? to_spi_device(dev) : NULL;
  3706. }
  3707. /* The spi controllers are not using spi_bus, so we find it with another way */
  3708. static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
  3709. {
  3710. struct device *dev;
  3711. dev = class_find_device_by_of_node(&spi_master_class, node);
  3712. if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
  3713. dev = class_find_device_by_of_node(&spi_slave_class, node);
  3714. if (!dev)
  3715. return NULL;
  3716. /* Reference got in class_find_device */
  3717. return container_of(dev, struct spi_controller, dev);
  3718. }
  3719. static int of_spi_notify(struct notifier_block *nb, unsigned long action,
  3720. void *arg)
  3721. {
  3722. struct of_reconfig_data *rd = arg;
  3723. struct spi_controller *ctlr;
  3724. struct spi_device *spi;
  3725. switch (of_reconfig_get_state_change(action, arg)) {
  3726. case OF_RECONFIG_CHANGE_ADD:
  3727. ctlr = of_find_spi_controller_by_node(rd->dn->parent);
  3728. if (ctlr == NULL)
  3729. return NOTIFY_OK; /* Not for us */
  3730. if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
  3731. put_device(&ctlr->dev);
  3732. return NOTIFY_OK;
  3733. }
  3734. /*
  3735. * Clear the flag before adding the device so that fw_devlink
  3736. * doesn't skip adding consumers to this device.
  3737. */
  3738. rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
  3739. spi = of_register_spi_device(ctlr, rd->dn);
  3740. put_device(&ctlr->dev);
  3741. if (IS_ERR(spi)) {
  3742. pr_err("%s: failed to create for '%pOF'\n",
  3743. __func__, rd->dn);
  3744. of_node_clear_flag(rd->dn, OF_POPULATED);
  3745. return notifier_from_errno(PTR_ERR(spi));
  3746. }
  3747. break;
  3748. case OF_RECONFIG_CHANGE_REMOVE:
  3749. /* Already depopulated? */
  3750. if (!of_node_check_flag(rd->dn, OF_POPULATED))
  3751. return NOTIFY_OK;
  3752. /* Find our device by node */
  3753. spi = of_find_spi_device_by_node(rd->dn);
  3754. if (spi == NULL)
  3755. return NOTIFY_OK; /* No? not meant for us */
  3756. /* Unregister takes one ref away */
  3757. spi_unregister_device(spi);
  3758. /* And put the reference of the find */
  3759. put_device(&spi->dev);
  3760. break;
  3761. }
  3762. return NOTIFY_OK;
  3763. }
  3764. static struct notifier_block spi_of_notifier = {
  3765. .notifier_call = of_spi_notify,
  3766. };
  3767. #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  3768. extern struct notifier_block spi_of_notifier;
  3769. #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  3770. #if IS_ENABLED(CONFIG_ACPI)
  3771. static int spi_acpi_controller_match(struct device *dev, const void *data)
  3772. {
  3773. return ACPI_COMPANION(dev->parent) == data;
  3774. }
  3775. static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
  3776. {
  3777. struct device *dev;
  3778. dev = class_find_device(&spi_master_class, NULL, adev,
  3779. spi_acpi_controller_match);
  3780. if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
  3781. dev = class_find_device(&spi_slave_class, NULL, adev,
  3782. spi_acpi_controller_match);
  3783. if (!dev)
  3784. return NULL;
  3785. return container_of(dev, struct spi_controller, dev);
  3786. }
  3787. static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
  3788. {
  3789. struct device *dev;
  3790. dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
  3791. return to_spi_device(dev);
  3792. }
  3793. static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
  3794. void *arg)
  3795. {
  3796. struct acpi_device *adev = arg;
  3797. struct spi_controller *ctlr;
  3798. struct spi_device *spi;
  3799. switch (value) {
  3800. case ACPI_RECONFIG_DEVICE_ADD:
  3801. ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
  3802. if (!ctlr)
  3803. break;
  3804. acpi_register_spi_device(ctlr, adev);
  3805. put_device(&ctlr->dev);
  3806. break;
  3807. case ACPI_RECONFIG_DEVICE_REMOVE:
  3808. if (!acpi_device_enumerated(adev))
  3809. break;
  3810. spi = acpi_spi_find_device_by_adev(adev);
  3811. if (!spi)
  3812. break;
  3813. spi_unregister_device(spi);
  3814. put_device(&spi->dev);
  3815. break;
  3816. }
  3817. return NOTIFY_OK;
  3818. }
  3819. static struct notifier_block spi_acpi_notifier = {
  3820. .notifier_call = acpi_spi_notify,
  3821. };
  3822. #else
  3823. extern struct notifier_block spi_acpi_notifier;
  3824. #endif
  3825. static int __init spi_init(void)
  3826. {
  3827. int status;
  3828. buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
  3829. if (!buf) {
  3830. status = -ENOMEM;
  3831. goto err0;
  3832. }
  3833. status = bus_register(&spi_bus_type);
  3834. if (status < 0)
  3835. goto err1;
  3836. status = class_register(&spi_master_class);
  3837. if (status < 0)
  3838. goto err2;
  3839. if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
  3840. status = class_register(&spi_slave_class);
  3841. if (status < 0)
  3842. goto err3;
  3843. }
  3844. if (IS_ENABLED(CONFIG_OF_DYNAMIC))
  3845. WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
  3846. if (IS_ENABLED(CONFIG_ACPI))
  3847. WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
  3848. return 0;
  3849. err3:
  3850. class_unregister(&spi_master_class);
  3851. err2:
  3852. bus_unregister(&spi_bus_type);
  3853. err1:
  3854. kfree(buf);
  3855. buf = NULL;
  3856. err0:
  3857. return status;
  3858. }
  3859. /*
  3860. * A board_info is normally registered in arch_initcall(),
  3861. * but even essential drivers wait till later.
  3862. *
  3863. * REVISIT only boardinfo really needs static linking. The rest (device and
  3864. * driver registration) _could_ be dynamically linked (modular) ... Costs
  3865. * include needing to have boardinfo data structures be much more public.
  3866. */
  3867. postcore_initcall(spi_init);