sdhci.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
  4. *
  5. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
  6. *
  7. * Thanks to the following companies for their support:
  8. *
  9. * - JMicron (hardware and technical support)
  10. */
  11. #include <linux/bitfield.h>
  12. #include <linux/delay.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/ktime.h>
  15. #include <linux/highmem.h>
  16. #include <linux/io.h>
  17. #include <linux/module.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/slab.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/sizes.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/of.h>
  25. #include <linux/leds.h>
  26. #include <linux/mmc/mmc.h>
  27. #include <linux/mmc/host.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/sdio.h>
  30. #include <linux/mmc/slot-gpio.h>
  31. #include "sdhci.h"
  32. #define DRIVER_NAME "sdhci"
  33. #define DBG(f, x...) \
  34. pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
  35. #define SDHCI_DUMP(f, x...) \
  36. pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
  37. #define MAX_TUNING_LOOP 40
  38. static unsigned int debug_quirks = 0;
  39. static unsigned int debug_quirks2;
  40. static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
  41. static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
  42. void sdhci_dumpregs(struct sdhci_host *host)
  43. {
  44. SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
  45. SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
  46. sdhci_readl(host, SDHCI_DMA_ADDRESS),
  47. sdhci_readw(host, SDHCI_HOST_VERSION));
  48. SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
  49. sdhci_readw(host, SDHCI_BLOCK_SIZE),
  50. sdhci_readw(host, SDHCI_BLOCK_COUNT));
  51. SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
  52. sdhci_readl(host, SDHCI_ARGUMENT),
  53. sdhci_readw(host, SDHCI_TRANSFER_MODE));
  54. SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
  55. sdhci_readl(host, SDHCI_PRESENT_STATE),
  56. sdhci_readb(host, SDHCI_HOST_CONTROL));
  57. SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
  58. sdhci_readb(host, SDHCI_POWER_CONTROL),
  59. sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
  60. SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
  61. sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
  62. sdhci_readw(host, SDHCI_CLOCK_CONTROL));
  63. SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
  64. sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
  65. sdhci_readl(host, SDHCI_INT_STATUS));
  66. SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
  67. sdhci_readl(host, SDHCI_INT_ENABLE),
  68. sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
  69. SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
  70. sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
  71. sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
  72. SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
  73. sdhci_readl(host, SDHCI_CAPABILITIES),
  74. sdhci_readl(host, SDHCI_CAPABILITIES_1));
  75. SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
  76. sdhci_readw(host, SDHCI_COMMAND),
  77. sdhci_readl(host, SDHCI_MAX_CURRENT));
  78. SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
  79. sdhci_readl(host, SDHCI_RESPONSE),
  80. sdhci_readl(host, SDHCI_RESPONSE + 4));
  81. SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
  82. sdhci_readl(host, SDHCI_RESPONSE + 8),
  83. sdhci_readl(host, SDHCI_RESPONSE + 12));
  84. SDHCI_DUMP("Host ctl2: 0x%08x\n",
  85. sdhci_readw(host, SDHCI_HOST_CONTROL2));
  86. if (host->flags & SDHCI_USE_ADMA) {
  87. if (host->flags & SDHCI_USE_64_BIT_DMA) {
  88. SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
  89. sdhci_readl(host, SDHCI_ADMA_ERROR),
  90. sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
  91. sdhci_readl(host, SDHCI_ADMA_ADDRESS));
  92. } else {
  93. SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
  94. sdhci_readl(host, SDHCI_ADMA_ERROR),
  95. sdhci_readl(host, SDHCI_ADMA_ADDRESS));
  96. }
  97. }
  98. if (host->ops->dump_vendor_regs)
  99. host->ops->dump_vendor_regs(host);
  100. SDHCI_DUMP("============================================\n");
  101. }
  102. EXPORT_SYMBOL_GPL(sdhci_dumpregs);
  103. /*****************************************************************************\
  104. * *
  105. * Low level functions *
  106. * *
  107. \*****************************************************************************/
  108. static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
  109. {
  110. u16 ctrl2;
  111. ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  112. if (ctrl2 & SDHCI_CTRL_V4_MODE)
  113. return;
  114. ctrl2 |= SDHCI_CTRL_V4_MODE;
  115. sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
  116. }
  117. /*
  118. * This can be called before sdhci_add_host() by Vendor's host controller
  119. * driver to enable v4 mode if supported.
  120. */
  121. void sdhci_enable_v4_mode(struct sdhci_host *host)
  122. {
  123. host->v4_mode = true;
  124. sdhci_do_enable_v4_mode(host);
  125. }
  126. EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
  127. static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
  128. {
  129. return cmd->data || cmd->flags & MMC_RSP_BUSY;
  130. }
  131. static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
  132. {
  133. u32 present;
  134. if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
  135. !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
  136. return;
  137. if (enable) {
  138. present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  139. SDHCI_CARD_PRESENT;
  140. host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  141. SDHCI_INT_CARD_INSERT;
  142. } else {
  143. host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
  144. }
  145. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  146. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  147. }
  148. static void sdhci_enable_card_detection(struct sdhci_host *host)
  149. {
  150. sdhci_set_card_detection(host, true);
  151. }
  152. static void sdhci_disable_card_detection(struct sdhci_host *host)
  153. {
  154. sdhci_set_card_detection(host, false);
  155. }
  156. static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
  157. {
  158. if (host->bus_on)
  159. return;
  160. host->bus_on = true;
  161. pm_runtime_get_noresume(mmc_dev(host->mmc));
  162. }
  163. static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
  164. {
  165. if (!host->bus_on)
  166. return;
  167. host->bus_on = false;
  168. pm_runtime_put_noidle(mmc_dev(host->mmc));
  169. }
  170. void sdhci_reset(struct sdhci_host *host, u8 mask)
  171. {
  172. ktime_t timeout;
  173. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  174. if (mask & SDHCI_RESET_ALL) {
  175. host->clock = 0;
  176. /* Reset-all turns off SD Bus Power */
  177. if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  178. sdhci_runtime_pm_bus_off(host);
  179. }
  180. /* Wait max 100 ms */
  181. timeout = ktime_add_ms(ktime_get(), 100);
  182. /* hw clears the bit when it's done */
  183. while (1) {
  184. bool timedout = ktime_after(ktime_get(), timeout);
  185. if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
  186. break;
  187. if (timedout) {
  188. pr_err("%s: Reset 0x%x never completed.\n",
  189. mmc_hostname(host->mmc), (int)mask);
  190. sdhci_err_stats_inc(host, CTRL_TIMEOUT);
  191. sdhci_dumpregs(host);
  192. return;
  193. }
  194. udelay(10);
  195. }
  196. }
  197. EXPORT_SYMBOL_GPL(sdhci_reset);
  198. static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
  199. {
  200. if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  201. struct mmc_host *mmc = host->mmc;
  202. if (!mmc->ops->get_cd(mmc))
  203. return false;
  204. }
  205. host->ops->reset(host, mask);
  206. return true;
  207. }
  208. static void sdhci_reset_for_all(struct sdhci_host *host)
  209. {
  210. if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
  211. if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  212. if (host->ops->enable_dma)
  213. host->ops->enable_dma(host);
  214. }
  215. /* Resetting the controller clears many */
  216. host->preset_enabled = false;
  217. }
  218. }
  219. enum sdhci_reset_reason {
  220. SDHCI_RESET_FOR_INIT,
  221. SDHCI_RESET_FOR_REQUEST_ERROR,
  222. SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
  223. SDHCI_RESET_FOR_TUNING_ABORT,
  224. SDHCI_RESET_FOR_CARD_REMOVED,
  225. SDHCI_RESET_FOR_CQE_RECOVERY,
  226. };
  227. static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
  228. {
  229. if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
  230. sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  231. return;
  232. }
  233. switch (reason) {
  234. case SDHCI_RESET_FOR_INIT:
  235. sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  236. break;
  237. case SDHCI_RESET_FOR_REQUEST_ERROR:
  238. case SDHCI_RESET_FOR_TUNING_ABORT:
  239. case SDHCI_RESET_FOR_CARD_REMOVED:
  240. case SDHCI_RESET_FOR_CQE_RECOVERY:
  241. sdhci_do_reset(host, SDHCI_RESET_CMD);
  242. sdhci_do_reset(host, SDHCI_RESET_DATA);
  243. break;
  244. case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
  245. sdhci_do_reset(host, SDHCI_RESET_DATA);
  246. break;
  247. }
  248. }
  249. #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
  250. static void sdhci_set_default_irqs(struct sdhci_host *host)
  251. {
  252. host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  253. SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
  254. SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
  255. SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
  256. SDHCI_INT_RESPONSE;
  257. if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
  258. host->tuning_mode == SDHCI_TUNING_MODE_3)
  259. host->ier |= SDHCI_INT_RETUNE;
  260. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  261. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  262. }
  263. static void sdhci_config_dma(struct sdhci_host *host)
  264. {
  265. u8 ctrl;
  266. u16 ctrl2;
  267. if (host->version < SDHCI_SPEC_200)
  268. return;
  269. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  270. /*
  271. * Always adjust the DMA selection as some controllers
  272. * (e.g. JMicron) can't do PIO properly when the selection
  273. * is ADMA.
  274. */
  275. ctrl &= ~SDHCI_CTRL_DMA_MASK;
  276. if (!(host->flags & SDHCI_REQ_USE_DMA))
  277. goto out;
  278. /* Note if DMA Select is zero then SDMA is selected */
  279. if (host->flags & SDHCI_USE_ADMA)
  280. ctrl |= SDHCI_CTRL_ADMA32;
  281. if (host->flags & SDHCI_USE_64_BIT_DMA) {
  282. /*
  283. * If v4 mode, all supported DMA can be 64-bit addressing if
  284. * controller supports 64-bit system address, otherwise only
  285. * ADMA can support 64-bit addressing.
  286. */
  287. if (host->v4_mode) {
  288. ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  289. ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
  290. sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
  291. } else if (host->flags & SDHCI_USE_ADMA) {
  292. /*
  293. * Don't need to undo SDHCI_CTRL_ADMA32 in order to
  294. * set SDHCI_CTRL_ADMA64.
  295. */
  296. ctrl |= SDHCI_CTRL_ADMA64;
  297. }
  298. }
  299. out:
  300. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  301. }
  302. static void sdhci_init(struct sdhci_host *host, int soft)
  303. {
  304. struct mmc_host *mmc = host->mmc;
  305. unsigned long flags;
  306. if (soft)
  307. sdhci_reset_for(host, INIT);
  308. else
  309. sdhci_reset_for_all(host);
  310. if (host->v4_mode)
  311. sdhci_do_enable_v4_mode(host);
  312. spin_lock_irqsave(&host->lock, flags);
  313. sdhci_set_default_irqs(host);
  314. spin_unlock_irqrestore(&host->lock, flags);
  315. host->cqe_on = false;
  316. if (soft) {
  317. /* force clock reconfiguration */
  318. host->clock = 0;
  319. host->reinit_uhs = true;
  320. mmc->ops->set_ios(mmc, &mmc->ios);
  321. }
  322. }
  323. static void sdhci_reinit(struct sdhci_host *host)
  324. {
  325. u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
  326. sdhci_init(host, 0);
  327. sdhci_enable_card_detection(host);
  328. /*
  329. * A change to the card detect bits indicates a change in present state,
  330. * refer sdhci_set_card_detection(). A card detect interrupt might have
  331. * been missed while the host controller was being reset, so trigger a
  332. * rescan to check.
  333. */
  334. if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
  335. mmc_detect_change(host->mmc, msecs_to_jiffies(200));
  336. }
  337. static void __sdhci_led_activate(struct sdhci_host *host)
  338. {
  339. u8 ctrl;
  340. if (host->quirks & SDHCI_QUIRK_NO_LED)
  341. return;
  342. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  343. ctrl |= SDHCI_CTRL_LED;
  344. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  345. }
  346. static void __sdhci_led_deactivate(struct sdhci_host *host)
  347. {
  348. u8 ctrl;
  349. if (host->quirks & SDHCI_QUIRK_NO_LED)
  350. return;
  351. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  352. ctrl &= ~SDHCI_CTRL_LED;
  353. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  354. }
  355. #if IS_REACHABLE(CONFIG_LEDS_CLASS)
  356. static void sdhci_led_control(struct led_classdev *led,
  357. enum led_brightness brightness)
  358. {
  359. struct sdhci_host *host = container_of(led, struct sdhci_host, led);
  360. unsigned long flags;
  361. spin_lock_irqsave(&host->lock, flags);
  362. if (host->runtime_suspended)
  363. goto out;
  364. if (brightness == LED_OFF)
  365. __sdhci_led_deactivate(host);
  366. else
  367. __sdhci_led_activate(host);
  368. out:
  369. spin_unlock_irqrestore(&host->lock, flags);
  370. }
  371. static int sdhci_led_register(struct sdhci_host *host)
  372. {
  373. struct mmc_host *mmc = host->mmc;
  374. if (host->quirks & SDHCI_QUIRK_NO_LED)
  375. return 0;
  376. snprintf(host->led_name, sizeof(host->led_name),
  377. "%s::", mmc_hostname(mmc));
  378. host->led.name = host->led_name;
  379. host->led.brightness = LED_OFF;
  380. host->led.default_trigger = mmc_hostname(mmc);
  381. host->led.brightness_set = sdhci_led_control;
  382. return led_classdev_register(mmc_dev(mmc), &host->led);
  383. }
  384. static void sdhci_led_unregister(struct sdhci_host *host)
  385. {
  386. if (host->quirks & SDHCI_QUIRK_NO_LED)
  387. return;
  388. led_classdev_unregister(&host->led);
  389. }
  390. static inline void sdhci_led_activate(struct sdhci_host *host)
  391. {
  392. }
  393. static inline void sdhci_led_deactivate(struct sdhci_host *host)
  394. {
  395. }
  396. #else
  397. static inline int sdhci_led_register(struct sdhci_host *host)
  398. {
  399. return 0;
  400. }
  401. static inline void sdhci_led_unregister(struct sdhci_host *host)
  402. {
  403. }
  404. static inline void sdhci_led_activate(struct sdhci_host *host)
  405. {
  406. __sdhci_led_activate(host);
  407. }
  408. static inline void sdhci_led_deactivate(struct sdhci_host *host)
  409. {
  410. __sdhci_led_deactivate(host);
  411. }
  412. #endif
  413. static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
  414. unsigned long timeout)
  415. {
  416. if (sdhci_data_line_cmd(mrq->cmd))
  417. mod_timer(&host->data_timer, timeout);
  418. else
  419. mod_timer(&host->timer, timeout);
  420. }
  421. static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
  422. {
  423. if (sdhci_data_line_cmd(mrq->cmd))
  424. del_timer(&host->data_timer);
  425. else
  426. del_timer(&host->timer);
  427. }
  428. static inline bool sdhci_has_requests(struct sdhci_host *host)
  429. {
  430. return host->cmd || host->data_cmd;
  431. }
  432. /*****************************************************************************\
  433. * *
  434. * Core functions *
  435. * *
  436. \*****************************************************************************/
  437. static void sdhci_read_block_pio(struct sdhci_host *host)
  438. {
  439. unsigned long flags;
  440. size_t blksize, len, chunk;
  441. u32 scratch;
  442. u8 *buf;
  443. DBG("PIO reading\n");
  444. blksize = host->data->blksz;
  445. chunk = 0;
  446. local_irq_save(flags);
  447. while (blksize) {
  448. BUG_ON(!sg_miter_next(&host->sg_miter));
  449. len = min(host->sg_miter.length, blksize);
  450. blksize -= len;
  451. host->sg_miter.consumed = len;
  452. buf = host->sg_miter.addr;
  453. while (len) {
  454. if (chunk == 0) {
  455. scratch = sdhci_readl(host, SDHCI_BUFFER);
  456. chunk = 4;
  457. }
  458. *buf = scratch & 0xFF;
  459. buf++;
  460. scratch >>= 8;
  461. chunk--;
  462. len--;
  463. }
  464. }
  465. sg_miter_stop(&host->sg_miter);
  466. local_irq_restore(flags);
  467. }
  468. static void sdhci_write_block_pio(struct sdhci_host *host)
  469. {
  470. unsigned long flags;
  471. size_t blksize, len, chunk;
  472. u32 scratch;
  473. u8 *buf;
  474. DBG("PIO writing\n");
  475. blksize = host->data->blksz;
  476. chunk = 0;
  477. scratch = 0;
  478. local_irq_save(flags);
  479. while (blksize) {
  480. BUG_ON(!sg_miter_next(&host->sg_miter));
  481. len = min(host->sg_miter.length, blksize);
  482. blksize -= len;
  483. host->sg_miter.consumed = len;
  484. buf = host->sg_miter.addr;
  485. while (len) {
  486. scratch |= (u32)*buf << (chunk * 8);
  487. buf++;
  488. chunk++;
  489. len--;
  490. if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
  491. sdhci_writel(host, scratch, SDHCI_BUFFER);
  492. chunk = 0;
  493. scratch = 0;
  494. }
  495. }
  496. }
  497. sg_miter_stop(&host->sg_miter);
  498. local_irq_restore(flags);
  499. }
  500. static void sdhci_transfer_pio(struct sdhci_host *host)
  501. {
  502. u32 mask;
  503. if (host->blocks == 0)
  504. return;
  505. if (host->data->flags & MMC_DATA_READ)
  506. mask = SDHCI_DATA_AVAILABLE;
  507. else
  508. mask = SDHCI_SPACE_AVAILABLE;
  509. /*
  510. * Some controllers (JMicron JMB38x) mess up the buffer bits
  511. * for transfers < 4 bytes. As long as it is just one block,
  512. * we can ignore the bits.
  513. */
  514. if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
  515. (host->data->blocks == 1))
  516. mask = ~0;
  517. while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
  518. if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
  519. udelay(100);
  520. if (host->data->flags & MMC_DATA_READ)
  521. sdhci_read_block_pio(host);
  522. else
  523. sdhci_write_block_pio(host);
  524. host->blocks--;
  525. if (host->blocks == 0)
  526. break;
  527. }
  528. DBG("PIO transfer complete.\n");
  529. }
  530. static int sdhci_pre_dma_transfer(struct sdhci_host *host,
  531. struct mmc_data *data, int cookie)
  532. {
  533. int sg_count;
  534. /*
  535. * If the data buffers are already mapped, return the previous
  536. * dma_map_sg() result.
  537. */
  538. if (data->host_cookie == COOKIE_PRE_MAPPED)
  539. return data->sg_count;
  540. /* Bounce write requests to the bounce buffer */
  541. if (host->bounce_buffer) {
  542. unsigned int length = data->blksz * data->blocks;
  543. if (length > host->bounce_buffer_size) {
  544. pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
  545. mmc_hostname(host->mmc), length,
  546. host->bounce_buffer_size);
  547. return -EIO;
  548. }
  549. if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
  550. /* Copy the data to the bounce buffer */
  551. if (host->ops->copy_to_bounce_buffer) {
  552. host->ops->copy_to_bounce_buffer(host,
  553. data, length);
  554. } else {
  555. sg_copy_to_buffer(data->sg, data->sg_len,
  556. host->bounce_buffer, length);
  557. }
  558. }
  559. /* Switch ownership to the DMA */
  560. dma_sync_single_for_device(mmc_dev(host->mmc),
  561. host->bounce_addr,
  562. host->bounce_buffer_size,
  563. mmc_get_dma_dir(data));
  564. /* Just a dummy value */
  565. sg_count = 1;
  566. } else {
  567. /* Just access the data directly from memory */
  568. sg_count = dma_map_sg(mmc_dev(host->mmc),
  569. data->sg, data->sg_len,
  570. mmc_get_dma_dir(data));
  571. }
  572. if (sg_count == 0)
  573. return -ENOSPC;
  574. data->sg_count = sg_count;
  575. data->host_cookie = cookie;
  576. return sg_count;
  577. }
  578. static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
  579. {
  580. local_irq_save(*flags);
  581. return kmap_atomic(sg_page(sg)) + sg->offset;
  582. }
  583. static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
  584. {
  585. kunmap_atomic(buffer);
  586. local_irq_restore(*flags);
  587. }
  588. void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
  589. dma_addr_t addr, int len, unsigned int cmd)
  590. {
  591. struct sdhci_adma2_64_desc *dma_desc = *desc;
  592. /* 32-bit and 64-bit descriptors have these members in same position */
  593. dma_desc->cmd = cpu_to_le16(cmd);
  594. dma_desc->len = cpu_to_le16(len);
  595. dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
  596. if (host->flags & SDHCI_USE_64_BIT_DMA)
  597. dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
  598. *desc += host->desc_sz;
  599. }
  600. EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
  601. static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
  602. void **desc, dma_addr_t addr,
  603. int len, unsigned int cmd)
  604. {
  605. if (host->ops->adma_write_desc)
  606. host->ops->adma_write_desc(host, desc, addr, len, cmd);
  607. else
  608. sdhci_adma_write_desc(host, desc, addr, len, cmd);
  609. }
  610. static void sdhci_adma_mark_end(void *desc)
  611. {
  612. struct sdhci_adma2_64_desc *dma_desc = desc;
  613. /* 32-bit and 64-bit descriptors have 'cmd' in same position */
  614. dma_desc->cmd |= cpu_to_le16(ADMA2_END);
  615. }
  616. static void sdhci_adma_table_pre(struct sdhci_host *host,
  617. struct mmc_data *data, int sg_count)
  618. {
  619. struct scatterlist *sg;
  620. unsigned long flags;
  621. dma_addr_t addr, align_addr;
  622. void *desc, *align;
  623. char *buffer;
  624. int len, offset, i;
  625. /*
  626. * The spec does not specify endianness of descriptor table.
  627. * We currently guess that it is LE.
  628. */
  629. host->sg_count = sg_count;
  630. desc = host->adma_table;
  631. align = host->align_buffer;
  632. align_addr = host->align_addr;
  633. for_each_sg(data->sg, sg, host->sg_count, i) {
  634. addr = sg_dma_address(sg);
  635. len = sg_dma_len(sg);
  636. /*
  637. * The SDHCI specification states that ADMA addresses must
  638. * be 32-bit aligned. If they aren't, then we use a bounce
  639. * buffer for the (up to three) bytes that screw up the
  640. * alignment.
  641. */
  642. offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
  643. SDHCI_ADMA2_MASK;
  644. if (offset) {
  645. if (data->flags & MMC_DATA_WRITE) {
  646. buffer = sdhci_kmap_atomic(sg, &flags);
  647. memcpy(align, buffer, offset);
  648. sdhci_kunmap_atomic(buffer, &flags);
  649. }
  650. /* tran, valid */
  651. __sdhci_adma_write_desc(host, &desc, align_addr,
  652. offset, ADMA2_TRAN_VALID);
  653. BUG_ON(offset > 65536);
  654. align += SDHCI_ADMA2_ALIGN;
  655. align_addr += SDHCI_ADMA2_ALIGN;
  656. addr += offset;
  657. len -= offset;
  658. }
  659. /*
  660. * The block layer forces a minimum segment size of PAGE_SIZE,
  661. * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
  662. * multiple descriptors, noting that the ADMA table is sized
  663. * for 4KiB chunks anyway, so it will be big enough.
  664. */
  665. while (len > host->max_adma) {
  666. int n = 32 * 1024; /* 32KiB*/
  667. __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
  668. addr += n;
  669. len -= n;
  670. }
  671. /* tran, valid */
  672. if (len)
  673. __sdhci_adma_write_desc(host, &desc, addr, len,
  674. ADMA2_TRAN_VALID);
  675. /*
  676. * If this triggers then we have a calculation bug
  677. * somewhere. :/
  678. */
  679. WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
  680. }
  681. if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
  682. /* Mark the last descriptor as the terminating descriptor */
  683. if (desc != host->adma_table) {
  684. desc -= host->desc_sz;
  685. sdhci_adma_mark_end(desc);
  686. }
  687. } else {
  688. /* Add a terminating entry - nop, end, valid */
  689. __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
  690. }
  691. }
  692. static void sdhci_adma_table_post(struct sdhci_host *host,
  693. struct mmc_data *data)
  694. {
  695. struct scatterlist *sg;
  696. int i, size;
  697. void *align;
  698. char *buffer;
  699. unsigned long flags;
  700. if (data->flags & MMC_DATA_READ) {
  701. bool has_unaligned = false;
  702. /* Do a quick scan of the SG list for any unaligned mappings */
  703. for_each_sg(data->sg, sg, host->sg_count, i)
  704. if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
  705. has_unaligned = true;
  706. break;
  707. }
  708. if (has_unaligned) {
  709. dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
  710. data->sg_len, DMA_FROM_DEVICE);
  711. align = host->align_buffer;
  712. for_each_sg(data->sg, sg, host->sg_count, i) {
  713. if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
  714. size = SDHCI_ADMA2_ALIGN -
  715. (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
  716. buffer = sdhci_kmap_atomic(sg, &flags);
  717. memcpy(buffer, align, size);
  718. sdhci_kunmap_atomic(buffer, &flags);
  719. align += SDHCI_ADMA2_ALIGN;
  720. }
  721. }
  722. }
  723. }
  724. }
  725. static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
  726. {
  727. sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
  728. if (host->flags & SDHCI_USE_64_BIT_DMA)
  729. sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
  730. }
  731. static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
  732. {
  733. if (host->bounce_buffer)
  734. return host->bounce_addr;
  735. else
  736. return sg_dma_address(host->data->sg);
  737. }
  738. static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
  739. {
  740. if (host->v4_mode)
  741. sdhci_set_adma_addr(host, addr);
  742. else
  743. sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
  744. }
  745. static unsigned int sdhci_target_timeout(struct sdhci_host *host,
  746. struct mmc_command *cmd,
  747. struct mmc_data *data)
  748. {
  749. unsigned int target_timeout;
  750. /* timeout in us */
  751. if (!data) {
  752. target_timeout = cmd->busy_timeout * 1000;
  753. } else {
  754. target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
  755. if (host->clock && data->timeout_clks) {
  756. unsigned long long val;
  757. /*
  758. * data->timeout_clks is in units of clock cycles.
  759. * host->clock is in Hz. target_timeout is in us.
  760. * Hence, us = 1000000 * cycles / Hz. Round up.
  761. */
  762. val = 1000000ULL * data->timeout_clks;
  763. if (do_div(val, host->clock))
  764. target_timeout++;
  765. target_timeout += val;
  766. }
  767. }
  768. return target_timeout;
  769. }
  770. static void sdhci_calc_sw_timeout(struct sdhci_host *host,
  771. struct mmc_command *cmd)
  772. {
  773. struct mmc_data *data = cmd->data;
  774. struct mmc_host *mmc = host->mmc;
  775. struct mmc_ios *ios = &mmc->ios;
  776. unsigned char bus_width = 1 << ios->bus_width;
  777. unsigned int blksz;
  778. unsigned int freq;
  779. u64 target_timeout;
  780. u64 transfer_time;
  781. target_timeout = sdhci_target_timeout(host, cmd, data);
  782. target_timeout *= NSEC_PER_USEC;
  783. if (data) {
  784. blksz = data->blksz;
  785. freq = mmc->actual_clock ? : host->clock;
  786. transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
  787. do_div(transfer_time, freq);
  788. /* multiply by '2' to account for any unknowns */
  789. transfer_time = transfer_time * 2;
  790. /* calculate timeout for the entire data */
  791. host->data_timeout = data->blocks * target_timeout +
  792. transfer_time;
  793. } else {
  794. host->data_timeout = target_timeout;
  795. }
  796. if (host->data_timeout)
  797. host->data_timeout += MMC_CMD_TRANSFER_TIME;
  798. }
  799. static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
  800. bool *too_big)
  801. {
  802. u8 count;
  803. struct mmc_data *data;
  804. unsigned target_timeout, current_timeout;
  805. *too_big = false;
  806. /*
  807. * If the host controller provides us with an incorrect timeout
  808. * value, just skip the check and use the maximum. The hardware may take
  809. * longer to time out, but that's much better than having a too-short
  810. * timeout value.
  811. */
  812. if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
  813. return host->max_timeout_count;
  814. /* Unspecified command, assume max */
  815. if (cmd == NULL)
  816. return host->max_timeout_count;
  817. data = cmd->data;
  818. /* Unspecified timeout, assume max */
  819. if (!data && !cmd->busy_timeout)
  820. return host->max_timeout_count;
  821. /* timeout in us */
  822. target_timeout = sdhci_target_timeout(host, cmd, data);
  823. /*
  824. * Figure out needed cycles.
  825. * We do this in steps in order to fit inside a 32 bit int.
  826. * The first step is the minimum timeout, which will have a
  827. * minimum resolution of 6 bits:
  828. * (1) 2^13*1000 > 2^22,
  829. * (2) host->timeout_clk < 2^16
  830. * =>
  831. * (1) / (2) > 2^6
  832. */
  833. count = 0;
  834. current_timeout = (1 << 13) * 1000 / host->timeout_clk;
  835. while (current_timeout < target_timeout) {
  836. count++;
  837. current_timeout <<= 1;
  838. if (count > host->max_timeout_count) {
  839. if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
  840. DBG("Too large timeout 0x%x requested for CMD%d!\n",
  841. count, cmd->opcode);
  842. count = host->max_timeout_count;
  843. *too_big = true;
  844. break;
  845. }
  846. }
  847. return count;
  848. }
  849. static void sdhci_set_transfer_irqs(struct sdhci_host *host)
  850. {
  851. u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
  852. u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
  853. if (host->flags & SDHCI_REQ_USE_DMA)
  854. host->ier = (host->ier & ~pio_irqs) | dma_irqs;
  855. else
  856. host->ier = (host->ier & ~dma_irqs) | pio_irqs;
  857. if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
  858. host->ier |= SDHCI_INT_AUTO_CMD_ERR;
  859. else
  860. host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
  861. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  862. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  863. }
  864. void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
  865. {
  866. if (enable)
  867. host->ier |= SDHCI_INT_DATA_TIMEOUT;
  868. else
  869. host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
  870. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  871. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  872. }
  873. EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
  874. void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
  875. {
  876. bool too_big = false;
  877. u8 count = sdhci_calc_timeout(host, cmd, &too_big);
  878. if (too_big &&
  879. host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
  880. sdhci_calc_sw_timeout(host, cmd);
  881. sdhci_set_data_timeout_irq(host, false);
  882. } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
  883. sdhci_set_data_timeout_irq(host, true);
  884. }
  885. sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
  886. }
  887. EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
  888. static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
  889. {
  890. if (host->ops->set_timeout)
  891. host->ops->set_timeout(host, cmd);
  892. else
  893. __sdhci_set_timeout(host, cmd);
  894. }
  895. static void sdhci_initialize_data(struct sdhci_host *host,
  896. struct mmc_data *data)
  897. {
  898. WARN_ON(host->data);
  899. /* Sanity checks */
  900. BUG_ON(data->blksz * data->blocks > 524288);
  901. BUG_ON(data->blksz > host->mmc->max_blk_size);
  902. BUG_ON(data->blocks > 65535);
  903. host->data = data;
  904. host->data_early = 0;
  905. host->data->bytes_xfered = 0;
  906. }
  907. static inline void sdhci_set_block_info(struct sdhci_host *host,
  908. struct mmc_data *data)
  909. {
  910. /* Set the DMA boundary value and block size */
  911. sdhci_writew(host,
  912. SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
  913. SDHCI_BLOCK_SIZE);
  914. /*
  915. * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
  916. * can be supported, in that case 16-bit block count register must be 0.
  917. */
  918. if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
  919. (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
  920. if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
  921. sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
  922. sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
  923. } else {
  924. sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
  925. }
  926. }
  927. static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
  928. {
  929. struct mmc_data *data = cmd->data;
  930. sdhci_initialize_data(host, data);
  931. if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  932. struct scatterlist *sg;
  933. unsigned int length_mask, offset_mask;
  934. int i;
  935. host->flags |= SDHCI_REQ_USE_DMA;
  936. /*
  937. * FIXME: This doesn't account for merging when mapping the
  938. * scatterlist.
  939. *
  940. * The assumption here being that alignment and lengths are
  941. * the same after DMA mapping to device address space.
  942. */
  943. length_mask = 0;
  944. offset_mask = 0;
  945. if (host->flags & SDHCI_USE_ADMA) {
  946. if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
  947. length_mask = 3;
  948. /*
  949. * As we use up to 3 byte chunks to work
  950. * around alignment problems, we need to
  951. * check the offset as well.
  952. */
  953. offset_mask = 3;
  954. }
  955. } else {
  956. if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
  957. length_mask = 3;
  958. if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
  959. offset_mask = 3;
  960. }
  961. if (unlikely(length_mask | offset_mask)) {
  962. for_each_sg(data->sg, sg, data->sg_len, i) {
  963. if (sg->length & length_mask) {
  964. DBG("Reverting to PIO because of transfer size (%d)\n",
  965. sg->length);
  966. host->flags &= ~SDHCI_REQ_USE_DMA;
  967. break;
  968. }
  969. if (sg->offset & offset_mask) {
  970. DBG("Reverting to PIO because of bad alignment\n");
  971. host->flags &= ~SDHCI_REQ_USE_DMA;
  972. break;
  973. }
  974. }
  975. }
  976. }
  977. sdhci_config_dma(host);
  978. if (host->flags & SDHCI_REQ_USE_DMA) {
  979. int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
  980. if (sg_cnt <= 0) {
  981. /*
  982. * This only happens when someone fed
  983. * us an invalid request.
  984. */
  985. WARN_ON(1);
  986. host->flags &= ~SDHCI_REQ_USE_DMA;
  987. } else if (host->flags & SDHCI_USE_ADMA) {
  988. sdhci_adma_table_pre(host, data, sg_cnt);
  989. sdhci_set_adma_addr(host, host->adma_addr);
  990. } else {
  991. WARN_ON(sg_cnt != 1);
  992. sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
  993. }
  994. }
  995. if (!(host->flags & SDHCI_REQ_USE_DMA)) {
  996. int flags;
  997. flags = SG_MITER_ATOMIC;
  998. if (host->data->flags & MMC_DATA_READ)
  999. flags |= SG_MITER_TO_SG;
  1000. else
  1001. flags |= SG_MITER_FROM_SG;
  1002. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  1003. host->blocks = data->blocks;
  1004. }
  1005. sdhci_set_transfer_irqs(host);
  1006. sdhci_set_block_info(host, data);
  1007. }
  1008. #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
  1009. static int sdhci_external_dma_init(struct sdhci_host *host)
  1010. {
  1011. int ret = 0;
  1012. struct mmc_host *mmc = host->mmc;
  1013. host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
  1014. if (IS_ERR(host->tx_chan)) {
  1015. ret = PTR_ERR(host->tx_chan);
  1016. if (ret != -EPROBE_DEFER)
  1017. pr_warn("Failed to request TX DMA channel.\n");
  1018. host->tx_chan = NULL;
  1019. return ret;
  1020. }
  1021. host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
  1022. if (IS_ERR(host->rx_chan)) {
  1023. if (host->tx_chan) {
  1024. dma_release_channel(host->tx_chan);
  1025. host->tx_chan = NULL;
  1026. }
  1027. ret = PTR_ERR(host->rx_chan);
  1028. if (ret != -EPROBE_DEFER)
  1029. pr_warn("Failed to request RX DMA channel.\n");
  1030. host->rx_chan = NULL;
  1031. }
  1032. return ret;
  1033. }
  1034. static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
  1035. struct mmc_data *data)
  1036. {
  1037. return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
  1038. }
  1039. static int sdhci_external_dma_setup(struct sdhci_host *host,
  1040. struct mmc_command *cmd)
  1041. {
  1042. int ret, i;
  1043. enum dma_transfer_direction dir;
  1044. struct dma_async_tx_descriptor *desc;
  1045. struct mmc_data *data = cmd->data;
  1046. struct dma_chan *chan;
  1047. struct dma_slave_config cfg;
  1048. dma_cookie_t cookie;
  1049. int sg_cnt;
  1050. if (!host->mapbase)
  1051. return -EINVAL;
  1052. memset(&cfg, 0, sizeof(cfg));
  1053. cfg.src_addr = host->mapbase + SDHCI_BUFFER;
  1054. cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
  1055. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1056. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1057. cfg.src_maxburst = data->blksz / 4;
  1058. cfg.dst_maxburst = data->blksz / 4;
  1059. /* Sanity check: all the SG entries must be aligned by block size. */
  1060. for (i = 0; i < data->sg_len; i++) {
  1061. if ((data->sg + i)->length % data->blksz)
  1062. return -EINVAL;
  1063. }
  1064. chan = sdhci_external_dma_channel(host, data);
  1065. ret = dmaengine_slave_config(chan, &cfg);
  1066. if (ret)
  1067. return ret;
  1068. sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
  1069. if (sg_cnt <= 0)
  1070. return -EINVAL;
  1071. dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
  1072. desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
  1073. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1074. if (!desc)
  1075. return -EINVAL;
  1076. desc->callback = NULL;
  1077. desc->callback_param = NULL;
  1078. cookie = dmaengine_submit(desc);
  1079. if (dma_submit_error(cookie))
  1080. ret = cookie;
  1081. return ret;
  1082. }
  1083. static void sdhci_external_dma_release(struct sdhci_host *host)
  1084. {
  1085. if (host->tx_chan) {
  1086. dma_release_channel(host->tx_chan);
  1087. host->tx_chan = NULL;
  1088. }
  1089. if (host->rx_chan) {
  1090. dma_release_channel(host->rx_chan);
  1091. host->rx_chan = NULL;
  1092. }
  1093. sdhci_switch_external_dma(host, false);
  1094. }
  1095. static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
  1096. struct mmc_command *cmd)
  1097. {
  1098. struct mmc_data *data = cmd->data;
  1099. sdhci_initialize_data(host, data);
  1100. host->flags |= SDHCI_REQ_USE_DMA;
  1101. sdhci_set_transfer_irqs(host);
  1102. sdhci_set_block_info(host, data);
  1103. }
  1104. static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
  1105. struct mmc_command *cmd)
  1106. {
  1107. if (!sdhci_external_dma_setup(host, cmd)) {
  1108. __sdhci_external_dma_prepare_data(host, cmd);
  1109. } else {
  1110. sdhci_external_dma_release(host);
  1111. pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
  1112. mmc_hostname(host->mmc));
  1113. sdhci_prepare_data(host, cmd);
  1114. }
  1115. }
  1116. static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
  1117. struct mmc_command *cmd)
  1118. {
  1119. struct dma_chan *chan;
  1120. if (!cmd->data)
  1121. return;
  1122. chan = sdhci_external_dma_channel(host, cmd->data);
  1123. if (chan)
  1124. dma_async_issue_pending(chan);
  1125. }
  1126. #else
  1127. static inline int sdhci_external_dma_init(struct sdhci_host *host)
  1128. {
  1129. return -EOPNOTSUPP;
  1130. }
  1131. static inline void sdhci_external_dma_release(struct sdhci_host *host)
  1132. {
  1133. }
  1134. static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
  1135. struct mmc_command *cmd)
  1136. {
  1137. /* This should never happen */
  1138. WARN_ON_ONCE(1);
  1139. }
  1140. static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
  1141. struct mmc_command *cmd)
  1142. {
  1143. }
  1144. static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
  1145. struct mmc_data *data)
  1146. {
  1147. return NULL;
  1148. }
  1149. #endif
  1150. void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
  1151. {
  1152. host->use_external_dma = en;
  1153. }
  1154. EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
  1155. static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
  1156. struct mmc_request *mrq)
  1157. {
  1158. return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
  1159. !mrq->cap_cmd_during_tfr;
  1160. }
  1161. static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
  1162. struct mmc_request *mrq)
  1163. {
  1164. return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
  1165. }
  1166. static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
  1167. struct mmc_request *mrq)
  1168. {
  1169. return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
  1170. }
  1171. static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
  1172. struct mmc_command *cmd,
  1173. u16 *mode)
  1174. {
  1175. bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
  1176. (cmd->opcode != SD_IO_RW_EXTENDED);
  1177. bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
  1178. u16 ctrl2;
  1179. /*
  1180. * In case of Version 4.10 or later, use of 'Auto CMD Auto
  1181. * Select' is recommended rather than use of 'Auto CMD12
  1182. * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
  1183. * here because some controllers (e.g sdhci-of-dwmshc) expect it.
  1184. */
  1185. if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
  1186. (use_cmd12 || use_cmd23)) {
  1187. *mode |= SDHCI_TRNS_AUTO_SEL;
  1188. ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  1189. if (use_cmd23)
  1190. ctrl2 |= SDHCI_CMD23_ENABLE;
  1191. else
  1192. ctrl2 &= ~SDHCI_CMD23_ENABLE;
  1193. sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
  1194. return;
  1195. }
  1196. /*
  1197. * If we are sending CMD23, CMD12 never gets sent
  1198. * on successful completion (so no Auto-CMD12).
  1199. */
  1200. if (use_cmd12)
  1201. *mode |= SDHCI_TRNS_AUTO_CMD12;
  1202. else if (use_cmd23)
  1203. *mode |= SDHCI_TRNS_AUTO_CMD23;
  1204. }
  1205. static void sdhci_set_transfer_mode(struct sdhci_host *host,
  1206. struct mmc_command *cmd)
  1207. {
  1208. u16 mode = 0;
  1209. struct mmc_data *data = cmd->data;
  1210. if (data == NULL) {
  1211. if (host->quirks2 &
  1212. SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
  1213. /* must not clear SDHCI_TRANSFER_MODE when tuning */
  1214. if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
  1215. sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
  1216. } else {
  1217. /* clear Auto CMD settings for no data CMDs */
  1218. mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
  1219. sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
  1220. SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
  1221. }
  1222. return;
  1223. }
  1224. WARN_ON(!host->data);
  1225. if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
  1226. mode = SDHCI_TRNS_BLK_CNT_EN;
  1227. if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
  1228. mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
  1229. sdhci_auto_cmd_select(host, cmd, &mode);
  1230. if (sdhci_auto_cmd23(host, cmd->mrq))
  1231. sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
  1232. }
  1233. if (data->flags & MMC_DATA_READ)
  1234. mode |= SDHCI_TRNS_READ;
  1235. if (host->flags & SDHCI_REQ_USE_DMA)
  1236. mode |= SDHCI_TRNS_DMA;
  1237. sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
  1238. }
  1239. static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
  1240. {
  1241. return (!(host->flags & SDHCI_DEVICE_DEAD) &&
  1242. ((mrq->cmd && mrq->cmd->error) ||
  1243. (mrq->sbc && mrq->sbc->error) ||
  1244. (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
  1245. (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
  1246. }
  1247. static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
  1248. {
  1249. int i;
  1250. for (i = 0; i < SDHCI_MAX_MRQS; i++) {
  1251. if (host->mrqs_done[i] == mrq) {
  1252. WARN_ON(1);
  1253. return;
  1254. }
  1255. }
  1256. for (i = 0; i < SDHCI_MAX_MRQS; i++) {
  1257. if (!host->mrqs_done[i]) {
  1258. host->mrqs_done[i] = mrq;
  1259. break;
  1260. }
  1261. }
  1262. WARN_ON(i >= SDHCI_MAX_MRQS);
  1263. }
  1264. static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
  1265. {
  1266. if (host->cmd && host->cmd->mrq == mrq)
  1267. host->cmd = NULL;
  1268. if (host->data_cmd && host->data_cmd->mrq == mrq)
  1269. host->data_cmd = NULL;
  1270. if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
  1271. host->deferred_cmd = NULL;
  1272. if (host->data && host->data->mrq == mrq)
  1273. host->data = NULL;
  1274. if (sdhci_needs_reset(host, mrq))
  1275. host->pending_reset = true;
  1276. sdhci_set_mrq_done(host, mrq);
  1277. sdhci_del_timer(host, mrq);
  1278. if (!sdhci_has_requests(host))
  1279. sdhci_led_deactivate(host);
  1280. }
  1281. static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
  1282. {
  1283. __sdhci_finish_mrq(host, mrq);
  1284. queue_work(host->complete_wq, &host->complete_work);
  1285. }
  1286. static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
  1287. {
  1288. struct mmc_command *data_cmd = host->data_cmd;
  1289. struct mmc_data *data = host->data;
  1290. host->data = NULL;
  1291. host->data_cmd = NULL;
  1292. /*
  1293. * The controller needs a reset of internal state machines upon error
  1294. * conditions.
  1295. */
  1296. if (data->error) {
  1297. if (!host->cmd || host->cmd == data_cmd)
  1298. sdhci_reset_for(host, REQUEST_ERROR);
  1299. else
  1300. sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
  1301. }
  1302. if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
  1303. (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
  1304. sdhci_adma_table_post(host, data);
  1305. /*
  1306. * The specification states that the block count register must
  1307. * be updated, but it does not specify at what point in the
  1308. * data flow. That makes the register entirely useless to read
  1309. * back so we have to assume that nothing made it to the card
  1310. * in the event of an error.
  1311. */
  1312. if (data->error)
  1313. data->bytes_xfered = 0;
  1314. else
  1315. data->bytes_xfered = data->blksz * data->blocks;
  1316. /*
  1317. * Need to send CMD12 if -
  1318. * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
  1319. * b) error in multiblock transfer
  1320. */
  1321. if (data->stop &&
  1322. ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
  1323. data->error)) {
  1324. /*
  1325. * 'cap_cmd_during_tfr' request must not use the command line
  1326. * after mmc_command_done() has been called. It is upper layer's
  1327. * responsibility to send the stop command if required.
  1328. */
  1329. if (data->mrq->cap_cmd_during_tfr) {
  1330. __sdhci_finish_mrq(host, data->mrq);
  1331. } else {
  1332. /* Avoid triggering warning in sdhci_send_command() */
  1333. host->cmd = NULL;
  1334. if (!sdhci_send_command(host, data->stop)) {
  1335. if (sw_data_timeout) {
  1336. /*
  1337. * This is anyway a sw data timeout, so
  1338. * give up now.
  1339. */
  1340. data->stop->error = -EIO;
  1341. __sdhci_finish_mrq(host, data->mrq);
  1342. } else {
  1343. WARN_ON(host->deferred_cmd);
  1344. host->deferred_cmd = data->stop;
  1345. }
  1346. }
  1347. }
  1348. } else {
  1349. __sdhci_finish_mrq(host, data->mrq);
  1350. }
  1351. }
  1352. static void sdhci_finish_data(struct sdhci_host *host)
  1353. {
  1354. __sdhci_finish_data(host, false);
  1355. }
  1356. static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
  1357. {
  1358. int flags;
  1359. u32 mask;
  1360. unsigned long timeout;
  1361. WARN_ON(host->cmd);
  1362. /* Initially, a command has no error */
  1363. cmd->error = 0;
  1364. if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
  1365. cmd->opcode == MMC_STOP_TRANSMISSION)
  1366. cmd->flags |= MMC_RSP_BUSY;
  1367. mask = SDHCI_CMD_INHIBIT;
  1368. if (sdhci_data_line_cmd(cmd))
  1369. mask |= SDHCI_DATA_INHIBIT;
  1370. /* We shouldn't wait for data inihibit for stop commands, even
  1371. though they might use busy signaling */
  1372. if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
  1373. mask &= ~SDHCI_DATA_INHIBIT;
  1374. if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
  1375. return false;
  1376. host->cmd = cmd;
  1377. host->data_timeout = 0;
  1378. if (sdhci_data_line_cmd(cmd)) {
  1379. WARN_ON(host->data_cmd);
  1380. host->data_cmd = cmd;
  1381. sdhci_set_timeout(host, cmd);
  1382. }
  1383. if (cmd->data) {
  1384. if (host->use_external_dma)
  1385. sdhci_external_dma_prepare_data(host, cmd);
  1386. else
  1387. sdhci_prepare_data(host, cmd);
  1388. }
  1389. sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
  1390. sdhci_set_transfer_mode(host, cmd);
  1391. if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
  1392. WARN_ONCE(1, "Unsupported response type!\n");
  1393. /*
  1394. * This does not happen in practice because 136-bit response
  1395. * commands never have busy waiting, so rather than complicate
  1396. * the error path, just remove busy waiting and continue.
  1397. */
  1398. cmd->flags &= ~MMC_RSP_BUSY;
  1399. }
  1400. if (!(cmd->flags & MMC_RSP_PRESENT))
  1401. flags = SDHCI_CMD_RESP_NONE;
  1402. else if (cmd->flags & MMC_RSP_136)
  1403. flags = SDHCI_CMD_RESP_LONG;
  1404. else if (cmd->flags & MMC_RSP_BUSY)
  1405. flags = SDHCI_CMD_RESP_SHORT_BUSY;
  1406. else
  1407. flags = SDHCI_CMD_RESP_SHORT;
  1408. if (cmd->flags & MMC_RSP_CRC)
  1409. flags |= SDHCI_CMD_CRC;
  1410. if (cmd->flags & MMC_RSP_OPCODE)
  1411. flags |= SDHCI_CMD_INDEX;
  1412. /* CMD19 is special in that the Data Present Select should be set */
  1413. if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
  1414. cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
  1415. flags |= SDHCI_CMD_DATA;
  1416. timeout = jiffies;
  1417. if (host->data_timeout)
  1418. timeout += nsecs_to_jiffies(host->data_timeout);
  1419. else if (!cmd->data && cmd->busy_timeout > 9000)
  1420. timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
  1421. else
  1422. timeout += 10 * HZ;
  1423. sdhci_mod_timer(host, cmd->mrq, timeout);
  1424. if (host->use_external_dma)
  1425. sdhci_external_dma_pre_transfer(host, cmd);
  1426. sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
  1427. return true;
  1428. }
  1429. static bool sdhci_present_error(struct sdhci_host *host,
  1430. struct mmc_command *cmd, bool present)
  1431. {
  1432. if (!present || host->flags & SDHCI_DEVICE_DEAD) {
  1433. cmd->error = -ENOMEDIUM;
  1434. return true;
  1435. }
  1436. return false;
  1437. }
  1438. static bool sdhci_send_command_retry(struct sdhci_host *host,
  1439. struct mmc_command *cmd,
  1440. unsigned long flags)
  1441. __releases(host->lock)
  1442. __acquires(host->lock)
  1443. {
  1444. struct mmc_command *deferred_cmd = host->deferred_cmd;
  1445. int timeout = 10; /* Approx. 10 ms */
  1446. bool present;
  1447. while (!sdhci_send_command(host, cmd)) {
  1448. if (!timeout--) {
  1449. pr_err("%s: Controller never released inhibit bit(s).\n",
  1450. mmc_hostname(host->mmc));
  1451. sdhci_err_stats_inc(host, CTRL_TIMEOUT);
  1452. sdhci_dumpregs(host);
  1453. cmd->error = -EIO;
  1454. return false;
  1455. }
  1456. spin_unlock_irqrestore(&host->lock, flags);
  1457. usleep_range(1000, 1250);
  1458. present = host->mmc->ops->get_cd(host->mmc);
  1459. spin_lock_irqsave(&host->lock, flags);
  1460. /* A deferred command might disappear, handle that */
  1461. if (cmd == deferred_cmd && cmd != host->deferred_cmd)
  1462. return true;
  1463. if (sdhci_present_error(host, cmd, present))
  1464. return false;
  1465. }
  1466. if (cmd == host->deferred_cmd)
  1467. host->deferred_cmd = NULL;
  1468. return true;
  1469. }
  1470. static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
  1471. {
  1472. int i, reg;
  1473. for (i = 0; i < 4; i++) {
  1474. reg = SDHCI_RESPONSE + (3 - i) * 4;
  1475. cmd->resp[i] = sdhci_readl(host, reg);
  1476. }
  1477. if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
  1478. return;
  1479. /* CRC is stripped so we need to do some shifting */
  1480. for (i = 0; i < 4; i++) {
  1481. cmd->resp[i] <<= 8;
  1482. if (i != 3)
  1483. cmd->resp[i] |= cmd->resp[i + 1] >> 24;
  1484. }
  1485. }
  1486. static void sdhci_finish_command(struct sdhci_host *host)
  1487. {
  1488. struct mmc_command *cmd = host->cmd;
  1489. host->cmd = NULL;
  1490. if (cmd->flags & MMC_RSP_PRESENT) {
  1491. if (cmd->flags & MMC_RSP_136) {
  1492. sdhci_read_rsp_136(host, cmd);
  1493. } else {
  1494. cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
  1495. }
  1496. }
  1497. if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
  1498. mmc_command_done(host->mmc, cmd->mrq);
  1499. /*
  1500. * The host can send and interrupt when the busy state has
  1501. * ended, allowing us to wait without wasting CPU cycles.
  1502. * The busy signal uses DAT0 so this is similar to waiting
  1503. * for data to complete.
  1504. *
  1505. * Note: The 1.0 specification is a bit ambiguous about this
  1506. * feature so there might be some problems with older
  1507. * controllers.
  1508. */
  1509. if (cmd->flags & MMC_RSP_BUSY) {
  1510. if (cmd->data) {
  1511. DBG("Cannot wait for busy signal when also doing a data transfer");
  1512. } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
  1513. cmd == host->data_cmd) {
  1514. /* Command complete before busy is ended */
  1515. return;
  1516. }
  1517. }
  1518. /* Finished CMD23, now send actual command. */
  1519. if (cmd == cmd->mrq->sbc) {
  1520. if (!sdhci_send_command(host, cmd->mrq->cmd)) {
  1521. WARN_ON(host->deferred_cmd);
  1522. host->deferred_cmd = cmd->mrq->cmd;
  1523. }
  1524. } else {
  1525. /* Processed actual command. */
  1526. if (host->data && host->data_early)
  1527. sdhci_finish_data(host);
  1528. if (!cmd->data)
  1529. __sdhci_finish_mrq(host, cmd->mrq);
  1530. }
  1531. }
  1532. static u16 sdhci_get_preset_value(struct sdhci_host *host)
  1533. {
  1534. u16 preset = 0;
  1535. switch (host->timing) {
  1536. case MMC_TIMING_MMC_HS:
  1537. case MMC_TIMING_SD_HS:
  1538. preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
  1539. break;
  1540. case MMC_TIMING_UHS_SDR12:
  1541. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
  1542. break;
  1543. case MMC_TIMING_UHS_SDR25:
  1544. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
  1545. break;
  1546. case MMC_TIMING_UHS_SDR50:
  1547. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
  1548. break;
  1549. case MMC_TIMING_UHS_SDR104:
  1550. case MMC_TIMING_MMC_HS200:
  1551. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
  1552. break;
  1553. case MMC_TIMING_UHS_DDR50:
  1554. case MMC_TIMING_MMC_DDR52:
  1555. preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
  1556. break;
  1557. case MMC_TIMING_MMC_HS400:
  1558. preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
  1559. break;
  1560. default:
  1561. pr_warn("%s: Invalid UHS-I mode selected\n",
  1562. mmc_hostname(host->mmc));
  1563. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
  1564. break;
  1565. }
  1566. return preset;
  1567. }
  1568. u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
  1569. unsigned int *actual_clock)
  1570. {
  1571. int div = 0; /* Initialized for compiler warning */
  1572. int real_div = div, clk_mul = 1;
  1573. u16 clk = 0;
  1574. bool switch_base_clk = false;
  1575. if (host->version >= SDHCI_SPEC_300) {
  1576. if (host->preset_enabled) {
  1577. u16 pre_val;
  1578. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  1579. pre_val = sdhci_get_preset_value(host);
  1580. div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
  1581. if (host->clk_mul &&
  1582. (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
  1583. clk = SDHCI_PROG_CLOCK_MODE;
  1584. real_div = div + 1;
  1585. clk_mul = host->clk_mul;
  1586. } else {
  1587. real_div = max_t(int, 1, div << 1);
  1588. }
  1589. goto clock_set;
  1590. }
  1591. /*
  1592. * Check if the Host Controller supports Programmable Clock
  1593. * Mode.
  1594. */
  1595. if (host->clk_mul) {
  1596. for (div = 1; div <= 1024; div++) {
  1597. if ((host->max_clk * host->clk_mul / div)
  1598. <= clock)
  1599. break;
  1600. }
  1601. if ((host->max_clk * host->clk_mul / div) <= clock) {
  1602. /*
  1603. * Set Programmable Clock Mode in the Clock
  1604. * Control register.
  1605. */
  1606. clk = SDHCI_PROG_CLOCK_MODE;
  1607. real_div = div;
  1608. clk_mul = host->clk_mul;
  1609. div--;
  1610. } else {
  1611. /*
  1612. * Divisor can be too small to reach clock
  1613. * speed requirement. Then use the base clock.
  1614. */
  1615. switch_base_clk = true;
  1616. }
  1617. }
  1618. if (!host->clk_mul || switch_base_clk) {
  1619. /* Version 3.00 divisors must be a multiple of 2. */
  1620. if (host->max_clk <= clock)
  1621. div = 1;
  1622. else {
  1623. for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
  1624. div += 2) {
  1625. if ((host->max_clk / div) <= clock)
  1626. break;
  1627. }
  1628. }
  1629. real_div = div;
  1630. div >>= 1;
  1631. if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
  1632. && !div && host->max_clk <= 25000000)
  1633. div = 1;
  1634. }
  1635. } else {
  1636. /* Version 2.00 divisors must be a power of 2. */
  1637. for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
  1638. if ((host->max_clk / div) <= clock)
  1639. break;
  1640. }
  1641. real_div = div;
  1642. div >>= 1;
  1643. }
  1644. clock_set:
  1645. if (real_div)
  1646. *actual_clock = (host->max_clk * clk_mul) / real_div;
  1647. clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
  1648. clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
  1649. << SDHCI_DIVIDER_HI_SHIFT;
  1650. return clk;
  1651. }
  1652. EXPORT_SYMBOL_GPL(sdhci_calc_clk);
  1653. void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
  1654. {
  1655. ktime_t timeout;
  1656. clk |= SDHCI_CLOCK_INT_EN;
  1657. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  1658. /* Wait max 150 ms */
  1659. timeout = ktime_add_ms(ktime_get(), 150);
  1660. while (1) {
  1661. bool timedout = ktime_after(ktime_get(), timeout);
  1662. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  1663. if (clk & SDHCI_CLOCK_INT_STABLE)
  1664. break;
  1665. if (timedout) {
  1666. pr_err("%s: Internal clock never stabilised.\n",
  1667. mmc_hostname(host->mmc));
  1668. sdhci_err_stats_inc(host, CTRL_TIMEOUT);
  1669. sdhci_dumpregs(host);
  1670. return;
  1671. }
  1672. udelay(10);
  1673. }
  1674. if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
  1675. clk |= SDHCI_CLOCK_PLL_EN;
  1676. clk &= ~SDHCI_CLOCK_INT_STABLE;
  1677. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  1678. /* Wait max 150 ms */
  1679. timeout = ktime_add_ms(ktime_get(), 150);
  1680. while (1) {
  1681. bool timedout = ktime_after(ktime_get(), timeout);
  1682. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  1683. if (clk & SDHCI_CLOCK_INT_STABLE)
  1684. break;
  1685. if (timedout) {
  1686. pr_err("%s: PLL clock never stabilised.\n",
  1687. mmc_hostname(host->mmc));
  1688. sdhci_err_stats_inc(host, CTRL_TIMEOUT);
  1689. sdhci_dumpregs(host);
  1690. return;
  1691. }
  1692. udelay(10);
  1693. }
  1694. }
  1695. clk |= SDHCI_CLOCK_CARD_EN;
  1696. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  1697. }
  1698. EXPORT_SYMBOL_GPL(sdhci_enable_clk);
  1699. void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  1700. {
  1701. u16 clk;
  1702. host->mmc->actual_clock = 0;
  1703. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  1704. if (clock == 0)
  1705. return;
  1706. clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
  1707. sdhci_enable_clk(host, clk);
  1708. }
  1709. EXPORT_SYMBOL_GPL(sdhci_set_clock);
  1710. static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
  1711. unsigned short vdd)
  1712. {
  1713. struct mmc_host *mmc = host->mmc;
  1714. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
  1715. if (mode != MMC_POWER_OFF)
  1716. sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
  1717. else
  1718. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  1719. }
  1720. void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
  1721. unsigned short vdd)
  1722. {
  1723. u8 pwr = 0;
  1724. if (mode != MMC_POWER_OFF) {
  1725. switch (1 << vdd) {
  1726. case MMC_VDD_165_195:
  1727. /*
  1728. * Without a regulator, SDHCI does not support 2.0v
  1729. * so we only get here if the driver deliberately
  1730. * added the 2.0v range to ocr_avail. Map it to 1.8v
  1731. * for the purpose of turning on the power.
  1732. */
  1733. case MMC_VDD_20_21:
  1734. pwr = SDHCI_POWER_180;
  1735. break;
  1736. case MMC_VDD_29_30:
  1737. case MMC_VDD_30_31:
  1738. pwr = SDHCI_POWER_300;
  1739. break;
  1740. case MMC_VDD_32_33:
  1741. case MMC_VDD_33_34:
  1742. /*
  1743. * 3.4 ~ 3.6V are valid only for those platforms where it's
  1744. * known that the voltage range is supported by hardware.
  1745. */
  1746. case MMC_VDD_34_35:
  1747. case MMC_VDD_35_36:
  1748. pwr = SDHCI_POWER_330;
  1749. break;
  1750. default:
  1751. WARN(1, "%s: Invalid vdd %#x\n",
  1752. mmc_hostname(host->mmc), vdd);
  1753. break;
  1754. }
  1755. }
  1756. if (host->pwr == pwr)
  1757. return;
  1758. host->pwr = pwr;
  1759. if (pwr == 0) {
  1760. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  1761. if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  1762. sdhci_runtime_pm_bus_off(host);
  1763. } else {
  1764. /*
  1765. * Spec says that we should clear the power reg before setting
  1766. * a new value. Some controllers don't seem to like this though.
  1767. */
  1768. if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  1769. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  1770. /*
  1771. * At least the Marvell CaFe chip gets confused if we set the
  1772. * voltage and set turn on power at the same time, so set the
  1773. * voltage first.
  1774. */
  1775. if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  1776. sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  1777. pwr |= SDHCI_POWER_ON;
  1778. sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  1779. if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  1780. sdhci_runtime_pm_bus_on(host);
  1781. /*
  1782. * Some controllers need an extra 10ms delay of 10ms before
  1783. * they can apply clock after applying power
  1784. */
  1785. if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
  1786. mdelay(10);
  1787. }
  1788. }
  1789. EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
  1790. void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
  1791. unsigned short vdd)
  1792. {
  1793. if (IS_ERR(host->mmc->supply.vmmc))
  1794. sdhci_set_power_noreg(host, mode, vdd);
  1795. else
  1796. sdhci_set_power_reg(host, mode, vdd);
  1797. }
  1798. EXPORT_SYMBOL_GPL(sdhci_set_power);
  1799. /*
  1800. * Some controllers need to configure a valid bus voltage on their power
  1801. * register regardless of whether an external regulator is taking care of power
  1802. * supply. This helper function takes care of it if set as the controller's
  1803. * sdhci_ops.set_power callback.
  1804. */
  1805. void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
  1806. unsigned char mode,
  1807. unsigned short vdd)
  1808. {
  1809. if (!IS_ERR(host->mmc->supply.vmmc)) {
  1810. struct mmc_host *mmc = host->mmc;
  1811. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
  1812. }
  1813. sdhci_set_power_noreg(host, mode, vdd);
  1814. }
  1815. EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
  1816. /*****************************************************************************\
  1817. * *
  1818. * MMC callbacks *
  1819. * *
  1820. \*****************************************************************************/
  1821. void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  1822. {
  1823. struct sdhci_host *host = mmc_priv(mmc);
  1824. struct mmc_command *cmd;
  1825. unsigned long flags;
  1826. bool present;
  1827. /* Firstly check card presence */
  1828. present = mmc->ops->get_cd(mmc);
  1829. spin_lock_irqsave(&host->lock, flags);
  1830. sdhci_led_activate(host);
  1831. if (sdhci_present_error(host, mrq->cmd, present))
  1832. goto out_finish;
  1833. cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
  1834. if (!sdhci_send_command_retry(host, cmd, flags))
  1835. goto out_finish;
  1836. spin_unlock_irqrestore(&host->lock, flags);
  1837. return;
  1838. out_finish:
  1839. sdhci_finish_mrq(host, mrq);
  1840. spin_unlock_irqrestore(&host->lock, flags);
  1841. }
  1842. EXPORT_SYMBOL_GPL(sdhci_request);
  1843. int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
  1844. {
  1845. struct sdhci_host *host = mmc_priv(mmc);
  1846. struct mmc_command *cmd;
  1847. unsigned long flags;
  1848. int ret = 0;
  1849. spin_lock_irqsave(&host->lock, flags);
  1850. if (sdhci_present_error(host, mrq->cmd, true)) {
  1851. sdhci_finish_mrq(host, mrq);
  1852. goto out_finish;
  1853. }
  1854. cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
  1855. /*
  1856. * The HSQ may send a command in interrupt context without polling
  1857. * the busy signaling, which means we should return BUSY if controller
  1858. * has not released inhibit bits to allow HSQ trying to send request
  1859. * again in non-atomic context. So we should not finish this request
  1860. * here.
  1861. */
  1862. if (!sdhci_send_command(host, cmd))
  1863. ret = -EBUSY;
  1864. else
  1865. sdhci_led_activate(host);
  1866. out_finish:
  1867. spin_unlock_irqrestore(&host->lock, flags);
  1868. return ret;
  1869. }
  1870. EXPORT_SYMBOL_GPL(sdhci_request_atomic);
  1871. void sdhci_set_bus_width(struct sdhci_host *host, int width)
  1872. {
  1873. u8 ctrl;
  1874. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  1875. if (width == MMC_BUS_WIDTH_8) {
  1876. ctrl &= ~SDHCI_CTRL_4BITBUS;
  1877. ctrl |= SDHCI_CTRL_8BITBUS;
  1878. } else {
  1879. if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
  1880. ctrl &= ~SDHCI_CTRL_8BITBUS;
  1881. if (width == MMC_BUS_WIDTH_4)
  1882. ctrl |= SDHCI_CTRL_4BITBUS;
  1883. else
  1884. ctrl &= ~SDHCI_CTRL_4BITBUS;
  1885. }
  1886. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  1887. }
  1888. EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
  1889. void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
  1890. {
  1891. u16 ctrl_2;
  1892. ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  1893. /* Select Bus Speed Mode for host */
  1894. ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  1895. if ((timing == MMC_TIMING_MMC_HS200) ||
  1896. (timing == MMC_TIMING_UHS_SDR104))
  1897. ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  1898. else if (timing == MMC_TIMING_UHS_SDR12)
  1899. ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  1900. else if (timing == MMC_TIMING_UHS_SDR25)
  1901. ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  1902. else if (timing == MMC_TIMING_UHS_SDR50)
  1903. ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  1904. else if ((timing == MMC_TIMING_UHS_DDR50) ||
  1905. (timing == MMC_TIMING_MMC_DDR52))
  1906. ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  1907. else if (timing == MMC_TIMING_MMC_HS400)
  1908. ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
  1909. sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  1910. }
  1911. EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
  1912. static bool sdhci_timing_has_preset(unsigned char timing)
  1913. {
  1914. switch (timing) {
  1915. case MMC_TIMING_UHS_SDR12:
  1916. case MMC_TIMING_UHS_SDR25:
  1917. case MMC_TIMING_UHS_SDR50:
  1918. case MMC_TIMING_UHS_SDR104:
  1919. case MMC_TIMING_UHS_DDR50:
  1920. case MMC_TIMING_MMC_DDR52:
  1921. return true;
  1922. };
  1923. return false;
  1924. }
  1925. static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
  1926. {
  1927. return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
  1928. sdhci_timing_has_preset(timing);
  1929. }
  1930. static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
  1931. {
  1932. /*
  1933. * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
  1934. * Frequency. Check if preset values need to be enabled, or the Driver
  1935. * Strength needs updating. Note, clock changes are handled separately.
  1936. */
  1937. return !host->preset_enabled &&
  1938. (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
  1939. }
  1940. void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  1941. {
  1942. struct sdhci_host *host = mmc_priv(mmc);
  1943. bool reinit_uhs = host->reinit_uhs;
  1944. bool turning_on_clk = false;
  1945. u8 ctrl;
  1946. host->reinit_uhs = false;
  1947. if (ios->power_mode == MMC_POWER_UNDEFINED)
  1948. return;
  1949. if (host->flags & SDHCI_DEVICE_DEAD) {
  1950. if (!IS_ERR(mmc->supply.vmmc) &&
  1951. ios->power_mode == MMC_POWER_OFF)
  1952. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  1953. return;
  1954. }
  1955. /*
  1956. * Reset the chip on each power off.
  1957. * Should clear out any weird states.
  1958. */
  1959. if (ios->power_mode == MMC_POWER_OFF) {
  1960. sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  1961. sdhci_reinit(host);
  1962. }
  1963. if (host->version >= SDHCI_SPEC_300 &&
  1964. (ios->power_mode == MMC_POWER_UP) &&
  1965. !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
  1966. sdhci_enable_preset_value(host, false);
  1967. if (!ios->clock || ios->clock != host->clock) {
  1968. turning_on_clk = ios->clock && !host->clock;
  1969. host->ops->set_clock(host, ios->clock);
  1970. host->clock = ios->clock;
  1971. if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
  1972. host->clock) {
  1973. host->timeout_clk = mmc->actual_clock ?
  1974. mmc->actual_clock / 1000 :
  1975. host->clock / 1000;
  1976. mmc->max_busy_timeout =
  1977. host->ops->get_max_timeout_count ?
  1978. host->ops->get_max_timeout_count(host) :
  1979. 1 << 27;
  1980. mmc->max_busy_timeout /= host->timeout_clk;
  1981. }
  1982. }
  1983. if (host->ops->set_power)
  1984. host->ops->set_power(host, ios->power_mode, ios->vdd);
  1985. else
  1986. sdhci_set_power(host, ios->power_mode, ios->vdd);
  1987. if (host->ops->platform_send_init_74_clocks)
  1988. host->ops->platform_send_init_74_clocks(host, ios->power_mode);
  1989. host->ops->set_bus_width(host, ios->bus_width);
  1990. /*
  1991. * Special case to avoid multiple clock changes during voltage
  1992. * switching.
  1993. */
  1994. if (!reinit_uhs &&
  1995. turning_on_clk &&
  1996. host->timing == ios->timing &&
  1997. host->version >= SDHCI_SPEC_300 &&
  1998. !sdhci_presetable_values_change(host, ios))
  1999. return;
  2000. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  2001. if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
  2002. if (ios->timing == MMC_TIMING_SD_HS ||
  2003. ios->timing == MMC_TIMING_MMC_HS ||
  2004. ios->timing == MMC_TIMING_MMC_HS400 ||
  2005. ios->timing == MMC_TIMING_MMC_HS200 ||
  2006. ios->timing == MMC_TIMING_MMC_DDR52 ||
  2007. ios->timing == MMC_TIMING_UHS_SDR50 ||
  2008. ios->timing == MMC_TIMING_UHS_SDR104 ||
  2009. ios->timing == MMC_TIMING_UHS_DDR50 ||
  2010. ios->timing == MMC_TIMING_UHS_SDR25)
  2011. ctrl |= SDHCI_CTRL_HISPD;
  2012. else
  2013. ctrl &= ~SDHCI_CTRL_HISPD;
  2014. }
  2015. if (host->version >= SDHCI_SPEC_300) {
  2016. u16 clk, ctrl_2;
  2017. if (!host->preset_enabled) {
  2018. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  2019. /*
  2020. * We only need to set Driver Strength if the
  2021. * preset value enable is not set.
  2022. */
  2023. ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2024. ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
  2025. if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
  2026. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
  2027. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
  2028. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
  2029. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
  2030. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
  2031. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
  2032. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
  2033. else {
  2034. pr_warn("%s: invalid driver type, default to driver type B\n",
  2035. mmc_hostname(mmc));
  2036. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
  2037. }
  2038. sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  2039. host->drv_type = ios->drv_type;
  2040. } else {
  2041. /*
  2042. * According to SDHC Spec v3.00, if the Preset Value
  2043. * Enable in the Host Control 2 register is set, we
  2044. * need to reset SD Clock Enable before changing High
  2045. * Speed Enable to avoid generating clock gliches.
  2046. */
  2047. /* Reset SD Clock Enable */
  2048. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  2049. clk &= ~SDHCI_CLOCK_CARD_EN;
  2050. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  2051. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  2052. /* Re-enable SD Clock */
  2053. host->ops->set_clock(host, host->clock);
  2054. }
  2055. /* Reset SD Clock Enable */
  2056. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  2057. clk &= ~SDHCI_CLOCK_CARD_EN;
  2058. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  2059. host->ops->set_uhs_signaling(host, ios->timing);
  2060. host->timing = ios->timing;
  2061. if (sdhci_preset_needed(host, ios->timing)) {
  2062. u16 preset;
  2063. sdhci_enable_preset_value(host, true);
  2064. preset = sdhci_get_preset_value(host);
  2065. ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
  2066. preset);
  2067. host->drv_type = ios->drv_type;
  2068. }
  2069. /* Re-enable SD Clock */
  2070. host->ops->set_clock(host, host->clock);
  2071. } else
  2072. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  2073. }
  2074. EXPORT_SYMBOL_GPL(sdhci_set_ios);
  2075. static int sdhci_get_cd(struct mmc_host *mmc)
  2076. {
  2077. struct sdhci_host *host = mmc_priv(mmc);
  2078. int gpio_cd = mmc_gpio_get_cd(mmc);
  2079. if (host->flags & SDHCI_DEVICE_DEAD)
  2080. return 0;
  2081. /* If nonremovable, assume that the card is always present. */
  2082. if (!mmc_card_is_removable(mmc))
  2083. return 1;
  2084. /*
  2085. * Try slot gpio detect, if defined it take precedence
  2086. * over build in controller functionality
  2087. */
  2088. if (gpio_cd >= 0)
  2089. return !!gpio_cd;
  2090. /* If polling, assume that the card is always present. */
  2091. if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
  2092. return 1;
  2093. /* Host native card detect */
  2094. return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
  2095. }
  2096. int sdhci_get_cd_nogpio(struct mmc_host *mmc)
  2097. {
  2098. struct sdhci_host *host = mmc_priv(mmc);
  2099. unsigned long flags;
  2100. int ret = 0;
  2101. spin_lock_irqsave(&host->lock, flags);
  2102. if (host->flags & SDHCI_DEVICE_DEAD)
  2103. goto out;
  2104. ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
  2105. out:
  2106. spin_unlock_irqrestore(&host->lock, flags);
  2107. return ret;
  2108. }
  2109. EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
  2110. static int sdhci_check_ro(struct sdhci_host *host)
  2111. {
  2112. unsigned long flags;
  2113. int is_readonly;
  2114. spin_lock_irqsave(&host->lock, flags);
  2115. if (host->flags & SDHCI_DEVICE_DEAD)
  2116. is_readonly = 0;
  2117. else if (host->ops->get_ro)
  2118. is_readonly = host->ops->get_ro(host);
  2119. else if (mmc_can_gpio_ro(host->mmc))
  2120. is_readonly = mmc_gpio_get_ro(host->mmc);
  2121. else
  2122. is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
  2123. & SDHCI_WRITE_PROTECT);
  2124. spin_unlock_irqrestore(&host->lock, flags);
  2125. /* This quirk needs to be replaced by a callback-function later */
  2126. return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
  2127. !is_readonly : is_readonly;
  2128. }
  2129. #define SAMPLE_COUNT 5
  2130. static int sdhci_get_ro(struct mmc_host *mmc)
  2131. {
  2132. struct sdhci_host *host = mmc_priv(mmc);
  2133. int i, ro_count;
  2134. if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
  2135. return sdhci_check_ro(host);
  2136. ro_count = 0;
  2137. for (i = 0; i < SAMPLE_COUNT; i++) {
  2138. if (sdhci_check_ro(host)) {
  2139. if (++ro_count > SAMPLE_COUNT / 2)
  2140. return 1;
  2141. }
  2142. msleep(30);
  2143. }
  2144. return 0;
  2145. }
  2146. static void sdhci_hw_reset(struct mmc_host *mmc)
  2147. {
  2148. struct sdhci_host *host = mmc_priv(mmc);
  2149. if (host->ops && host->ops->hw_reset)
  2150. host->ops->hw_reset(host);
  2151. }
  2152. static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
  2153. {
  2154. if (!(host->flags & SDHCI_DEVICE_DEAD)) {
  2155. if (enable)
  2156. host->ier |= SDHCI_INT_CARD_INT;
  2157. else
  2158. host->ier &= ~SDHCI_INT_CARD_INT;
  2159. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  2160. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  2161. }
  2162. }
  2163. void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  2164. {
  2165. struct sdhci_host *host = mmc_priv(mmc);
  2166. unsigned long flags;
  2167. if (enable)
  2168. pm_runtime_get_noresume(mmc_dev(mmc));
  2169. spin_lock_irqsave(&host->lock, flags);
  2170. sdhci_enable_sdio_irq_nolock(host, enable);
  2171. spin_unlock_irqrestore(&host->lock, flags);
  2172. if (!enable)
  2173. pm_runtime_put_noidle(mmc_dev(mmc));
  2174. }
  2175. EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
  2176. static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
  2177. {
  2178. struct sdhci_host *host = mmc_priv(mmc);
  2179. unsigned long flags;
  2180. spin_lock_irqsave(&host->lock, flags);
  2181. sdhci_enable_sdio_irq_nolock(host, true);
  2182. spin_unlock_irqrestore(&host->lock, flags);
  2183. }
  2184. int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
  2185. struct mmc_ios *ios)
  2186. {
  2187. struct sdhci_host *host = mmc_priv(mmc);
  2188. u16 ctrl;
  2189. int ret;
  2190. /*
  2191. * Signal Voltage Switching is only applicable for Host Controllers
  2192. * v3.00 and above.
  2193. */
  2194. if (host->version < SDHCI_SPEC_300)
  2195. return 0;
  2196. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2197. switch (ios->signal_voltage) {
  2198. case MMC_SIGNAL_VOLTAGE_330:
  2199. if (!(host->flags & SDHCI_SIGNALING_330))
  2200. return -EINVAL;
  2201. /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
  2202. ctrl &= ~SDHCI_CTRL_VDD_180;
  2203. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  2204. if (!IS_ERR(mmc->supply.vqmmc)) {
  2205. ret = mmc_regulator_set_vqmmc(mmc, ios);
  2206. if (ret < 0) {
  2207. pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
  2208. mmc_hostname(mmc));
  2209. return -EIO;
  2210. }
  2211. }
  2212. /* Wait for 5ms */
  2213. usleep_range(5000, 5500);
  2214. /* 3.3V regulator output should be stable within 5 ms */
  2215. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2216. if (!(ctrl & SDHCI_CTRL_VDD_180))
  2217. return 0;
  2218. pr_warn("%s: 3.3V regulator output did not become stable\n",
  2219. mmc_hostname(mmc));
  2220. return -EAGAIN;
  2221. case MMC_SIGNAL_VOLTAGE_180:
  2222. if (!(host->flags & SDHCI_SIGNALING_180))
  2223. return -EINVAL;
  2224. if (!IS_ERR(mmc->supply.vqmmc)) {
  2225. ret = mmc_regulator_set_vqmmc(mmc, ios);
  2226. if (ret < 0) {
  2227. pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
  2228. mmc_hostname(mmc));
  2229. return -EIO;
  2230. }
  2231. }
  2232. /*
  2233. * Enable 1.8V Signal Enable in the Host Control2
  2234. * register
  2235. */
  2236. ctrl |= SDHCI_CTRL_VDD_180;
  2237. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  2238. /* Some controller need to do more when switching */
  2239. if (host->ops->voltage_switch)
  2240. host->ops->voltage_switch(host);
  2241. /* 1.8V regulator output should be stable within 5 ms */
  2242. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2243. if (ctrl & SDHCI_CTRL_VDD_180)
  2244. return 0;
  2245. pr_warn("%s: 1.8V regulator output did not become stable\n",
  2246. mmc_hostname(mmc));
  2247. return -EAGAIN;
  2248. case MMC_SIGNAL_VOLTAGE_120:
  2249. if (!(host->flags & SDHCI_SIGNALING_120))
  2250. return -EINVAL;
  2251. if (!IS_ERR(mmc->supply.vqmmc)) {
  2252. ret = mmc_regulator_set_vqmmc(mmc, ios);
  2253. if (ret < 0) {
  2254. pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
  2255. mmc_hostname(mmc));
  2256. return -EIO;
  2257. }
  2258. }
  2259. return 0;
  2260. default:
  2261. /* No signal voltage switch required */
  2262. return 0;
  2263. }
  2264. }
  2265. EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
  2266. static int sdhci_card_busy(struct mmc_host *mmc)
  2267. {
  2268. struct sdhci_host *host = mmc_priv(mmc);
  2269. u32 present_state;
  2270. /* Check whether DAT[0] is 0 */
  2271. present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
  2272. return !(present_state & SDHCI_DATA_0_LVL_MASK);
  2273. }
  2274. static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
  2275. {
  2276. struct sdhci_host *host = mmc_priv(mmc);
  2277. unsigned long flags;
  2278. spin_lock_irqsave(&host->lock, flags);
  2279. host->flags |= SDHCI_HS400_TUNING;
  2280. spin_unlock_irqrestore(&host->lock, flags);
  2281. return 0;
  2282. }
  2283. void sdhci_start_tuning(struct sdhci_host *host)
  2284. {
  2285. u16 ctrl;
  2286. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2287. ctrl |= SDHCI_CTRL_EXEC_TUNING;
  2288. if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
  2289. ctrl |= SDHCI_CTRL_TUNED_CLK;
  2290. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  2291. /*
  2292. * As per the Host Controller spec v3.00, tuning command
  2293. * generates Buffer Read Ready interrupt, so enable that.
  2294. *
  2295. * Note: The spec clearly says that when tuning sequence
  2296. * is being performed, the controller does not generate
  2297. * interrupts other than Buffer Read Ready interrupt. But
  2298. * to make sure we don't hit a controller bug, we _only_
  2299. * enable Buffer Read Ready interrupt here.
  2300. */
  2301. sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
  2302. sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
  2303. }
  2304. EXPORT_SYMBOL_GPL(sdhci_start_tuning);
  2305. void sdhci_end_tuning(struct sdhci_host *host)
  2306. {
  2307. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  2308. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  2309. }
  2310. EXPORT_SYMBOL_GPL(sdhci_end_tuning);
  2311. void sdhci_reset_tuning(struct sdhci_host *host)
  2312. {
  2313. u16 ctrl;
  2314. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2315. ctrl &= ~SDHCI_CTRL_TUNED_CLK;
  2316. ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
  2317. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  2318. }
  2319. EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
  2320. void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
  2321. {
  2322. sdhci_reset_tuning(host);
  2323. sdhci_reset_for(host, TUNING_ABORT);
  2324. sdhci_end_tuning(host);
  2325. mmc_send_abort_tuning(host->mmc, opcode);
  2326. }
  2327. EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
  2328. /*
  2329. * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
  2330. * tuning command does not have a data payload (or rather the hardware does it
  2331. * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
  2332. * interrupt setup is different to other commands and there is no timeout
  2333. * interrupt so special handling is needed.
  2334. */
  2335. void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
  2336. {
  2337. struct mmc_host *mmc = host->mmc;
  2338. struct mmc_command cmd = {};
  2339. struct mmc_request mrq = {};
  2340. unsigned long flags;
  2341. u32 b = host->sdma_boundary;
  2342. spin_lock_irqsave(&host->lock, flags);
  2343. cmd.opcode = opcode;
  2344. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  2345. cmd.mrq = &mrq;
  2346. mrq.cmd = &cmd;
  2347. /*
  2348. * In response to CMD19, the card sends 64 bytes of tuning
  2349. * block to the Host Controller. So we set the block size
  2350. * to 64 here.
  2351. */
  2352. if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
  2353. mmc->ios.bus_width == MMC_BUS_WIDTH_8)
  2354. sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
  2355. else
  2356. sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
  2357. /*
  2358. * The tuning block is sent by the card to the host controller.
  2359. * So we set the TRNS_READ bit in the Transfer Mode register.
  2360. * This also takes care of setting DMA Enable and Multi Block
  2361. * Select in the same register to 0.
  2362. */
  2363. sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
  2364. if (!sdhci_send_command_retry(host, &cmd, flags)) {
  2365. spin_unlock_irqrestore(&host->lock, flags);
  2366. host->tuning_done = 0;
  2367. return;
  2368. }
  2369. host->cmd = NULL;
  2370. sdhci_del_timer(host, &mrq);
  2371. host->tuning_done = 0;
  2372. spin_unlock_irqrestore(&host->lock, flags);
  2373. /* Wait for Buffer Read Ready interrupt */
  2374. wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
  2375. msecs_to_jiffies(50));
  2376. }
  2377. EXPORT_SYMBOL_GPL(sdhci_send_tuning);
  2378. static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
  2379. {
  2380. int i;
  2381. /*
  2382. * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
  2383. * of loops reaches tuning loop count.
  2384. */
  2385. for (i = 0; i < host->tuning_loop_count; i++) {
  2386. u16 ctrl;
  2387. sdhci_send_tuning(host, opcode);
  2388. if (!host->tuning_done) {
  2389. pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
  2390. mmc_hostname(host->mmc));
  2391. sdhci_abort_tuning(host, opcode);
  2392. return -ETIMEDOUT;
  2393. }
  2394. /* Spec does not require a delay between tuning cycles */
  2395. if (host->tuning_delay > 0)
  2396. mdelay(host->tuning_delay);
  2397. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2398. if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
  2399. if (ctrl & SDHCI_CTRL_TUNED_CLK)
  2400. return 0; /* Success! */
  2401. break;
  2402. }
  2403. }
  2404. pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
  2405. mmc_hostname(host->mmc));
  2406. sdhci_reset_tuning(host);
  2407. return -EAGAIN;
  2408. }
  2409. int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
  2410. {
  2411. struct sdhci_host *host = mmc_priv(mmc);
  2412. int err = 0;
  2413. unsigned int tuning_count = 0;
  2414. bool hs400_tuning;
  2415. hs400_tuning = host->flags & SDHCI_HS400_TUNING;
  2416. if (host->tuning_mode == SDHCI_TUNING_MODE_1)
  2417. tuning_count = host->tuning_count;
  2418. /*
  2419. * The Host Controller needs tuning in case of SDR104 and DDR50
  2420. * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
  2421. * the Capabilities register.
  2422. * If the Host Controller supports the HS200 mode then the
  2423. * tuning function has to be executed.
  2424. */
  2425. switch (host->timing) {
  2426. /* HS400 tuning is done in HS200 mode */
  2427. case MMC_TIMING_MMC_HS400:
  2428. err = -EINVAL;
  2429. goto out;
  2430. case MMC_TIMING_MMC_HS200:
  2431. /*
  2432. * Periodic re-tuning for HS400 is not expected to be needed, so
  2433. * disable it here.
  2434. */
  2435. if (hs400_tuning)
  2436. tuning_count = 0;
  2437. break;
  2438. case MMC_TIMING_UHS_SDR104:
  2439. case MMC_TIMING_UHS_DDR50:
  2440. break;
  2441. case MMC_TIMING_UHS_SDR50:
  2442. if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
  2443. break;
  2444. fallthrough;
  2445. default:
  2446. goto out;
  2447. }
  2448. if (host->ops->platform_execute_tuning) {
  2449. err = host->ops->platform_execute_tuning(host, opcode);
  2450. goto out;
  2451. }
  2452. mmc->retune_period = tuning_count;
  2453. if (host->tuning_delay < 0)
  2454. host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
  2455. sdhci_start_tuning(host);
  2456. host->tuning_err = __sdhci_execute_tuning(host, opcode);
  2457. sdhci_end_tuning(host);
  2458. out:
  2459. host->flags &= ~SDHCI_HS400_TUNING;
  2460. return err;
  2461. }
  2462. EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
  2463. static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
  2464. {
  2465. /* Host Controller v3.00 defines preset value registers */
  2466. if (host->version < SDHCI_SPEC_300)
  2467. return;
  2468. /*
  2469. * We only enable or disable Preset Value if they are not already
  2470. * enabled or disabled respectively. Otherwise, we bail out.
  2471. */
  2472. if (host->preset_enabled != enable) {
  2473. u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  2474. if (enable)
  2475. ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
  2476. else
  2477. ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  2478. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  2479. if (enable)
  2480. host->flags |= SDHCI_PV_ENABLED;
  2481. else
  2482. host->flags &= ~SDHCI_PV_ENABLED;
  2483. host->preset_enabled = enable;
  2484. }
  2485. }
  2486. static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  2487. int err)
  2488. {
  2489. struct mmc_data *data = mrq->data;
  2490. if (data->host_cookie != COOKIE_UNMAPPED)
  2491. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  2492. mmc_get_dma_dir(data));
  2493. data->host_cookie = COOKIE_UNMAPPED;
  2494. }
  2495. static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  2496. {
  2497. struct sdhci_host *host = mmc_priv(mmc);
  2498. mrq->data->host_cookie = COOKIE_UNMAPPED;
  2499. /*
  2500. * No pre-mapping in the pre hook if we're using the bounce buffer,
  2501. * for that we would need two bounce buffers since one buffer is
  2502. * in flight when this is getting called.
  2503. */
  2504. if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
  2505. sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
  2506. }
  2507. static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
  2508. {
  2509. if (host->data_cmd) {
  2510. host->data_cmd->error = err;
  2511. sdhci_finish_mrq(host, host->data_cmd->mrq);
  2512. }
  2513. if (host->cmd) {
  2514. host->cmd->error = err;
  2515. sdhci_finish_mrq(host, host->cmd->mrq);
  2516. }
  2517. }
  2518. static void sdhci_card_event(struct mmc_host *mmc)
  2519. {
  2520. struct sdhci_host *host = mmc_priv(mmc);
  2521. unsigned long flags;
  2522. int present;
  2523. /* First check if client has provided their own card event */
  2524. if (host->ops->card_event)
  2525. host->ops->card_event(host);
  2526. present = mmc->ops->get_cd(mmc);
  2527. spin_lock_irqsave(&host->lock, flags);
  2528. /* Check sdhci_has_requests() first in case we are runtime suspended */
  2529. if (sdhci_has_requests(host) && !present) {
  2530. pr_err("%s: Card removed during transfer!\n",
  2531. mmc_hostname(mmc));
  2532. pr_err("%s: Resetting controller.\n",
  2533. mmc_hostname(mmc));
  2534. sdhci_reset_for(host, CARD_REMOVED);
  2535. sdhci_error_out_mrqs(host, -ENOMEDIUM);
  2536. }
  2537. spin_unlock_irqrestore(&host->lock, flags);
  2538. }
  2539. static const struct mmc_host_ops sdhci_ops = {
  2540. .request = sdhci_request,
  2541. .post_req = sdhci_post_req,
  2542. .pre_req = sdhci_pre_req,
  2543. .set_ios = sdhci_set_ios,
  2544. .get_cd = sdhci_get_cd,
  2545. .get_ro = sdhci_get_ro,
  2546. .card_hw_reset = sdhci_hw_reset,
  2547. .enable_sdio_irq = sdhci_enable_sdio_irq,
  2548. .ack_sdio_irq = sdhci_ack_sdio_irq,
  2549. .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
  2550. .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
  2551. .execute_tuning = sdhci_execute_tuning,
  2552. .card_event = sdhci_card_event,
  2553. .card_busy = sdhci_card_busy,
  2554. };
  2555. /*****************************************************************************\
  2556. * *
  2557. * Request done *
  2558. * *
  2559. \*****************************************************************************/
  2560. static bool sdhci_request_done(struct sdhci_host *host)
  2561. {
  2562. unsigned long flags;
  2563. struct mmc_request *mrq;
  2564. int i;
  2565. spin_lock_irqsave(&host->lock, flags);
  2566. for (i = 0; i < SDHCI_MAX_MRQS; i++) {
  2567. mrq = host->mrqs_done[i];
  2568. if (mrq)
  2569. break;
  2570. }
  2571. if (!mrq) {
  2572. spin_unlock_irqrestore(&host->lock, flags);
  2573. return true;
  2574. }
  2575. /*
  2576. * The controller needs a reset of internal state machines
  2577. * upon error conditions.
  2578. */
  2579. if (sdhci_needs_reset(host, mrq)) {
  2580. /*
  2581. * Do not finish until command and data lines are available for
  2582. * reset. Note there can only be one other mrq, so it cannot
  2583. * also be in mrqs_done, otherwise host->cmd and host->data_cmd
  2584. * would both be null.
  2585. */
  2586. if (host->cmd || host->data_cmd) {
  2587. spin_unlock_irqrestore(&host->lock, flags);
  2588. return true;
  2589. }
  2590. /* Some controllers need this kick or reset won't work here */
  2591. if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
  2592. /* This is to force an update */
  2593. host->ops->set_clock(host, host->clock);
  2594. sdhci_reset_for(host, REQUEST_ERROR);
  2595. host->pending_reset = false;
  2596. }
  2597. /*
  2598. * Always unmap the data buffers if they were mapped by
  2599. * sdhci_prepare_data() whenever we finish with a request.
  2600. * This avoids leaking DMA mappings on error.
  2601. */
  2602. if (host->flags & SDHCI_REQ_USE_DMA) {
  2603. struct mmc_data *data = mrq->data;
  2604. if (host->use_external_dma && data &&
  2605. (mrq->cmd->error || data->error)) {
  2606. struct dma_chan *chan = sdhci_external_dma_channel(host, data);
  2607. host->mrqs_done[i] = NULL;
  2608. spin_unlock_irqrestore(&host->lock, flags);
  2609. dmaengine_terminate_sync(chan);
  2610. spin_lock_irqsave(&host->lock, flags);
  2611. sdhci_set_mrq_done(host, mrq);
  2612. }
  2613. if (data && data->host_cookie == COOKIE_MAPPED) {
  2614. if (host->bounce_buffer) {
  2615. /*
  2616. * On reads, copy the bounced data into the
  2617. * sglist
  2618. */
  2619. if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
  2620. unsigned int length = data->bytes_xfered;
  2621. if (length > host->bounce_buffer_size) {
  2622. pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
  2623. mmc_hostname(host->mmc),
  2624. host->bounce_buffer_size,
  2625. data->bytes_xfered);
  2626. /* Cap it down and continue */
  2627. length = host->bounce_buffer_size;
  2628. }
  2629. dma_sync_single_for_cpu(
  2630. mmc_dev(host->mmc),
  2631. host->bounce_addr,
  2632. host->bounce_buffer_size,
  2633. DMA_FROM_DEVICE);
  2634. sg_copy_from_buffer(data->sg,
  2635. data->sg_len,
  2636. host->bounce_buffer,
  2637. length);
  2638. } else {
  2639. /* No copying, just switch ownership */
  2640. dma_sync_single_for_cpu(
  2641. mmc_dev(host->mmc),
  2642. host->bounce_addr,
  2643. host->bounce_buffer_size,
  2644. mmc_get_dma_dir(data));
  2645. }
  2646. } else {
  2647. /* Unmap the raw data */
  2648. dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  2649. data->sg_len,
  2650. mmc_get_dma_dir(data));
  2651. }
  2652. data->host_cookie = COOKIE_UNMAPPED;
  2653. }
  2654. }
  2655. host->mrqs_done[i] = NULL;
  2656. spin_unlock_irqrestore(&host->lock, flags);
  2657. if (host->ops->request_done)
  2658. host->ops->request_done(host, mrq);
  2659. else
  2660. mmc_request_done(host->mmc, mrq);
  2661. return false;
  2662. }
  2663. static void sdhci_complete_work(struct work_struct *work)
  2664. {
  2665. struct sdhci_host *host = container_of(work, struct sdhci_host,
  2666. complete_work);
  2667. while (!sdhci_request_done(host))
  2668. ;
  2669. }
  2670. static void sdhci_timeout_timer(struct timer_list *t)
  2671. {
  2672. struct sdhci_host *host;
  2673. unsigned long flags;
  2674. host = from_timer(host, t, timer);
  2675. spin_lock_irqsave(&host->lock, flags);
  2676. if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
  2677. pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
  2678. mmc_hostname(host->mmc));
  2679. sdhci_err_stats_inc(host, REQ_TIMEOUT);
  2680. sdhci_dumpregs(host);
  2681. host->cmd->error = -ETIMEDOUT;
  2682. sdhci_finish_mrq(host, host->cmd->mrq);
  2683. }
  2684. spin_unlock_irqrestore(&host->lock, flags);
  2685. }
  2686. static void sdhci_timeout_data_timer(struct timer_list *t)
  2687. {
  2688. struct sdhci_host *host;
  2689. unsigned long flags;
  2690. host = from_timer(host, t, data_timer);
  2691. spin_lock_irqsave(&host->lock, flags);
  2692. if (host->data || host->data_cmd ||
  2693. (host->cmd && sdhci_data_line_cmd(host->cmd))) {
  2694. pr_err("%s: Timeout waiting for hardware interrupt.\n",
  2695. mmc_hostname(host->mmc));
  2696. sdhci_err_stats_inc(host, REQ_TIMEOUT);
  2697. sdhci_dumpregs(host);
  2698. if (host->data) {
  2699. host->data->error = -ETIMEDOUT;
  2700. __sdhci_finish_data(host, true);
  2701. queue_work(host->complete_wq, &host->complete_work);
  2702. } else if (host->data_cmd) {
  2703. host->data_cmd->error = -ETIMEDOUT;
  2704. sdhci_finish_mrq(host, host->data_cmd->mrq);
  2705. } else {
  2706. host->cmd->error = -ETIMEDOUT;
  2707. sdhci_finish_mrq(host, host->cmd->mrq);
  2708. }
  2709. }
  2710. spin_unlock_irqrestore(&host->lock, flags);
  2711. }
  2712. /*****************************************************************************\
  2713. * *
  2714. * Interrupt handling *
  2715. * *
  2716. \*****************************************************************************/
  2717. static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
  2718. {
  2719. /* Handle auto-CMD12 error */
  2720. if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
  2721. struct mmc_request *mrq = host->data_cmd->mrq;
  2722. u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
  2723. int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
  2724. SDHCI_INT_DATA_TIMEOUT :
  2725. SDHCI_INT_DATA_CRC;
  2726. /* Treat auto-CMD12 error the same as data error */
  2727. if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
  2728. *intmask_p |= data_err_bit;
  2729. return;
  2730. }
  2731. }
  2732. if (!host->cmd) {
  2733. /*
  2734. * SDHCI recovers from errors by resetting the cmd and data
  2735. * circuits. Until that is done, there very well might be more
  2736. * interrupts, so ignore them in that case.
  2737. */
  2738. if (host->pending_reset)
  2739. return;
  2740. pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
  2741. mmc_hostname(host->mmc), (unsigned)intmask);
  2742. sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
  2743. sdhci_dumpregs(host);
  2744. return;
  2745. }
  2746. if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
  2747. SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
  2748. if (intmask & SDHCI_INT_TIMEOUT) {
  2749. host->cmd->error = -ETIMEDOUT;
  2750. sdhci_err_stats_inc(host, CMD_TIMEOUT);
  2751. } else {
  2752. host->cmd->error = -EILSEQ;
  2753. if (!mmc_op_tuning(host->cmd->opcode))
  2754. sdhci_err_stats_inc(host, CMD_CRC);
  2755. }
  2756. /* Treat data command CRC error the same as data CRC error */
  2757. if (host->cmd->data &&
  2758. (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
  2759. SDHCI_INT_CRC) {
  2760. host->cmd = NULL;
  2761. *intmask_p |= SDHCI_INT_DATA_CRC;
  2762. return;
  2763. }
  2764. __sdhci_finish_mrq(host, host->cmd->mrq);
  2765. return;
  2766. }
  2767. /* Handle auto-CMD23 error */
  2768. if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
  2769. struct mmc_request *mrq = host->cmd->mrq;
  2770. u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
  2771. int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
  2772. -ETIMEDOUT :
  2773. -EILSEQ;
  2774. sdhci_err_stats_inc(host, AUTO_CMD);
  2775. if (sdhci_auto_cmd23(host, mrq)) {
  2776. mrq->sbc->error = err;
  2777. __sdhci_finish_mrq(host, mrq);
  2778. return;
  2779. }
  2780. }
  2781. if (intmask & SDHCI_INT_RESPONSE)
  2782. sdhci_finish_command(host);
  2783. }
  2784. static void sdhci_adma_show_error(struct sdhci_host *host)
  2785. {
  2786. void *desc = host->adma_table;
  2787. dma_addr_t dma = host->adma_addr;
  2788. sdhci_dumpregs(host);
  2789. while (true) {
  2790. struct sdhci_adma2_64_desc *dma_desc = desc;
  2791. if (host->flags & SDHCI_USE_64_BIT_DMA)
  2792. SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
  2793. (unsigned long long)dma,
  2794. le32_to_cpu(dma_desc->addr_hi),
  2795. le32_to_cpu(dma_desc->addr_lo),
  2796. le16_to_cpu(dma_desc->len),
  2797. le16_to_cpu(dma_desc->cmd));
  2798. else
  2799. SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
  2800. (unsigned long long)dma,
  2801. le32_to_cpu(dma_desc->addr_lo),
  2802. le16_to_cpu(dma_desc->len),
  2803. le16_to_cpu(dma_desc->cmd));
  2804. desc += host->desc_sz;
  2805. dma += host->desc_sz;
  2806. if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
  2807. break;
  2808. }
  2809. }
  2810. static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
  2811. {
  2812. u32 command;
  2813. /*
  2814. * CMD19 generates _only_ Buffer Read Ready interrupt if
  2815. * use sdhci_send_tuning.
  2816. * Need to exclude this case: PIO mode and use mmc_send_tuning,
  2817. * If not, sdhci_transfer_pio will never be called, make the
  2818. * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
  2819. */
  2820. if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
  2821. command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
  2822. if (command == MMC_SEND_TUNING_BLOCK ||
  2823. command == MMC_SEND_TUNING_BLOCK_HS200) {
  2824. host->tuning_done = 1;
  2825. wake_up(&host->buf_ready_int);
  2826. return;
  2827. }
  2828. }
  2829. if (!host->data) {
  2830. struct mmc_command *data_cmd = host->data_cmd;
  2831. /*
  2832. * The "data complete" interrupt is also used to
  2833. * indicate that a busy state has ended. See comment
  2834. * above in sdhci_cmd_irq().
  2835. */
  2836. if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
  2837. if (intmask & SDHCI_INT_DATA_TIMEOUT) {
  2838. host->data_cmd = NULL;
  2839. data_cmd->error = -ETIMEDOUT;
  2840. sdhci_err_stats_inc(host, CMD_TIMEOUT);
  2841. __sdhci_finish_mrq(host, data_cmd->mrq);
  2842. return;
  2843. }
  2844. if (intmask & SDHCI_INT_DATA_END) {
  2845. host->data_cmd = NULL;
  2846. /*
  2847. * Some cards handle busy-end interrupt
  2848. * before the command completed, so make
  2849. * sure we do things in the proper order.
  2850. */
  2851. if (host->cmd == data_cmd)
  2852. return;
  2853. __sdhci_finish_mrq(host, data_cmd->mrq);
  2854. return;
  2855. }
  2856. }
  2857. /*
  2858. * SDHCI recovers from errors by resetting the cmd and data
  2859. * circuits. Until that is done, there very well might be more
  2860. * interrupts, so ignore them in that case.
  2861. */
  2862. if (host->pending_reset)
  2863. return;
  2864. pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
  2865. mmc_hostname(host->mmc), (unsigned)intmask);
  2866. sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
  2867. sdhci_dumpregs(host);
  2868. return;
  2869. }
  2870. if (intmask & SDHCI_INT_DATA_TIMEOUT) {
  2871. host->data->error = -ETIMEDOUT;
  2872. sdhci_err_stats_inc(host, DAT_TIMEOUT);
  2873. } else if (intmask & SDHCI_INT_DATA_END_BIT) {
  2874. host->data->error = -EILSEQ;
  2875. if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
  2876. sdhci_err_stats_inc(host, DAT_CRC);
  2877. } else if ((intmask & SDHCI_INT_DATA_CRC) &&
  2878. SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
  2879. != MMC_BUS_TEST_R) {
  2880. host->data->error = -EILSEQ;
  2881. if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
  2882. sdhci_err_stats_inc(host, DAT_CRC);
  2883. } else if (intmask & SDHCI_INT_ADMA_ERROR) {
  2884. pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
  2885. intmask);
  2886. sdhci_adma_show_error(host);
  2887. sdhci_err_stats_inc(host, ADMA);
  2888. host->data->error = -EIO;
  2889. if (host->ops->adma_workaround)
  2890. host->ops->adma_workaround(host, intmask);
  2891. }
  2892. if (host->data->error)
  2893. sdhci_finish_data(host);
  2894. else {
  2895. if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
  2896. sdhci_transfer_pio(host);
  2897. /*
  2898. * We currently don't do anything fancy with DMA
  2899. * boundaries, but as we can't disable the feature
  2900. * we need to at least restart the transfer.
  2901. *
  2902. * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
  2903. * should return a valid address to continue from, but as
  2904. * some controllers are faulty, don't trust them.
  2905. */
  2906. if (intmask & SDHCI_INT_DMA_END) {
  2907. dma_addr_t dmastart, dmanow;
  2908. dmastart = sdhci_sdma_address(host);
  2909. dmanow = dmastart + host->data->bytes_xfered;
  2910. /*
  2911. * Force update to the next DMA block boundary.
  2912. */
  2913. dmanow = (dmanow &
  2914. ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
  2915. SDHCI_DEFAULT_BOUNDARY_SIZE;
  2916. host->data->bytes_xfered = dmanow - dmastart;
  2917. DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
  2918. &dmastart, host->data->bytes_xfered, &dmanow);
  2919. sdhci_set_sdma_addr(host, dmanow);
  2920. }
  2921. if (intmask & SDHCI_INT_DATA_END) {
  2922. if (host->cmd == host->data_cmd) {
  2923. /*
  2924. * Data managed to finish before the
  2925. * command completed. Make sure we do
  2926. * things in the proper order.
  2927. */
  2928. host->data_early = 1;
  2929. } else {
  2930. sdhci_finish_data(host);
  2931. }
  2932. }
  2933. }
  2934. }
  2935. static inline bool sdhci_defer_done(struct sdhci_host *host,
  2936. struct mmc_request *mrq)
  2937. {
  2938. struct mmc_data *data = mrq->data;
  2939. return host->pending_reset || host->always_defer_done ||
  2940. ((host->flags & SDHCI_REQ_USE_DMA) && data &&
  2941. data->host_cookie == COOKIE_MAPPED);
  2942. }
  2943. static irqreturn_t sdhci_irq(int irq, void *dev_id)
  2944. {
  2945. struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
  2946. irqreturn_t result = IRQ_NONE;
  2947. struct sdhci_host *host = dev_id;
  2948. u32 intmask, mask, unexpected = 0;
  2949. int max_loops = 16;
  2950. int i;
  2951. spin_lock(&host->lock);
  2952. if (host->runtime_suspended) {
  2953. spin_unlock(&host->lock);
  2954. return IRQ_NONE;
  2955. }
  2956. intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  2957. if (!intmask || intmask == 0xffffffff) {
  2958. result = IRQ_NONE;
  2959. goto out;
  2960. }
  2961. do {
  2962. DBG("IRQ status 0x%08x\n", intmask);
  2963. if (host->ops->irq) {
  2964. intmask = host->ops->irq(host, intmask);
  2965. if (!intmask)
  2966. goto cont;
  2967. }
  2968. /* Clear selected interrupts. */
  2969. mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  2970. SDHCI_INT_BUS_POWER);
  2971. sdhci_writel(host, mask, SDHCI_INT_STATUS);
  2972. if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  2973. u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  2974. SDHCI_CARD_PRESENT;
  2975. /*
  2976. * There is a observation on i.mx esdhc. INSERT
  2977. * bit will be immediately set again when it gets
  2978. * cleared, if a card is inserted. We have to mask
  2979. * the irq to prevent interrupt storm which will
  2980. * freeze the system. And the REMOVE gets the
  2981. * same situation.
  2982. *
  2983. * More testing are needed here to ensure it works
  2984. * for other platforms though.
  2985. */
  2986. host->ier &= ~(SDHCI_INT_CARD_INSERT |
  2987. SDHCI_INT_CARD_REMOVE);
  2988. host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  2989. SDHCI_INT_CARD_INSERT;
  2990. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  2991. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  2992. sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
  2993. SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  2994. host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
  2995. SDHCI_INT_CARD_REMOVE);
  2996. result = IRQ_WAKE_THREAD;
  2997. }
  2998. if (intmask & SDHCI_INT_CMD_MASK)
  2999. sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
  3000. if (intmask & SDHCI_INT_DATA_MASK)
  3001. sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  3002. if (intmask & SDHCI_INT_BUS_POWER)
  3003. pr_err("%s: Card is consuming too much power!\n",
  3004. mmc_hostname(host->mmc));
  3005. if (intmask & SDHCI_INT_RETUNE)
  3006. mmc_retune_needed(host->mmc);
  3007. if ((intmask & SDHCI_INT_CARD_INT) &&
  3008. (host->ier & SDHCI_INT_CARD_INT)) {
  3009. sdhci_enable_sdio_irq_nolock(host, false);
  3010. sdio_signal_irq(host->mmc);
  3011. }
  3012. intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
  3013. SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  3014. SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
  3015. SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
  3016. if (intmask) {
  3017. unexpected |= intmask;
  3018. sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  3019. }
  3020. cont:
  3021. if (result == IRQ_NONE)
  3022. result = IRQ_HANDLED;
  3023. intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  3024. } while (intmask && --max_loops);
  3025. /* Determine if mrqs can be completed immediately */
  3026. for (i = 0; i < SDHCI_MAX_MRQS; i++) {
  3027. struct mmc_request *mrq = host->mrqs_done[i];
  3028. if (!mrq)
  3029. continue;
  3030. if (sdhci_defer_done(host, mrq)) {
  3031. result = IRQ_WAKE_THREAD;
  3032. } else {
  3033. mrqs_done[i] = mrq;
  3034. host->mrqs_done[i] = NULL;
  3035. }
  3036. }
  3037. out:
  3038. if (host->deferred_cmd)
  3039. result = IRQ_WAKE_THREAD;
  3040. spin_unlock(&host->lock);
  3041. /* Process mrqs ready for immediate completion */
  3042. for (i = 0; i < SDHCI_MAX_MRQS; i++) {
  3043. if (!mrqs_done[i])
  3044. continue;
  3045. if (host->ops->request_done)
  3046. host->ops->request_done(host, mrqs_done[i]);
  3047. else
  3048. mmc_request_done(host->mmc, mrqs_done[i]);
  3049. }
  3050. if (unexpected) {
  3051. pr_err("%s: Unexpected interrupt 0x%08x.\n",
  3052. mmc_hostname(host->mmc), unexpected);
  3053. sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
  3054. sdhci_dumpregs(host);
  3055. }
  3056. return result;
  3057. }
  3058. static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
  3059. {
  3060. struct sdhci_host *host = dev_id;
  3061. struct mmc_command *cmd;
  3062. unsigned long flags;
  3063. u32 isr;
  3064. while (!sdhci_request_done(host))
  3065. ;
  3066. spin_lock_irqsave(&host->lock, flags);
  3067. isr = host->thread_isr;
  3068. host->thread_isr = 0;
  3069. cmd = host->deferred_cmd;
  3070. if (cmd && !sdhci_send_command_retry(host, cmd, flags))
  3071. sdhci_finish_mrq(host, cmd->mrq);
  3072. spin_unlock_irqrestore(&host->lock, flags);
  3073. if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  3074. struct mmc_host *mmc = host->mmc;
  3075. mmc->ops->card_event(mmc);
  3076. mmc_detect_change(mmc, msecs_to_jiffies(200));
  3077. }
  3078. return IRQ_HANDLED;
  3079. }
  3080. /*****************************************************************************\
  3081. * *
  3082. * Suspend/resume *
  3083. * *
  3084. \*****************************************************************************/
  3085. #ifdef CONFIG_PM
  3086. static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
  3087. {
  3088. return mmc_card_is_removable(host->mmc) &&
  3089. !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
  3090. !mmc_can_gpio_cd(host->mmc);
  3091. }
  3092. /*
  3093. * To enable wakeup events, the corresponding events have to be enabled in
  3094. * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
  3095. * Table' in the SD Host Controller Standard Specification.
  3096. * It is useless to restore SDHCI_INT_ENABLE state in
  3097. * sdhci_disable_irq_wakeups() since it will be set by
  3098. * sdhci_enable_card_detection() or sdhci_init().
  3099. */
  3100. static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
  3101. {
  3102. u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
  3103. SDHCI_WAKE_ON_INT;
  3104. u32 irq_val = 0;
  3105. u8 wake_val = 0;
  3106. u8 val;
  3107. if (sdhci_cd_irq_can_wakeup(host)) {
  3108. wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
  3109. irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
  3110. }
  3111. if (mmc_card_wake_sdio_irq(host->mmc)) {
  3112. wake_val |= SDHCI_WAKE_ON_INT;
  3113. irq_val |= SDHCI_INT_CARD_INT;
  3114. }
  3115. if (!irq_val)
  3116. return false;
  3117. val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
  3118. val &= ~mask;
  3119. val |= wake_val;
  3120. sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
  3121. sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
  3122. host->irq_wake_enabled = !enable_irq_wake(host->irq);
  3123. return host->irq_wake_enabled;
  3124. }
  3125. static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
  3126. {
  3127. u8 val;
  3128. u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
  3129. | SDHCI_WAKE_ON_INT;
  3130. val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
  3131. val &= ~mask;
  3132. sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
  3133. disable_irq_wake(host->irq);
  3134. host->irq_wake_enabled = false;
  3135. }
  3136. int sdhci_suspend_host(struct sdhci_host *host)
  3137. {
  3138. sdhci_disable_card_detection(host);
  3139. mmc_retune_timer_stop(host->mmc);
  3140. if (!device_may_wakeup(mmc_dev(host->mmc)) ||
  3141. !sdhci_enable_irq_wakeups(host)) {
  3142. host->ier = 0;
  3143. sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  3144. sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  3145. free_irq(host->irq, host);
  3146. }
  3147. return 0;
  3148. }
  3149. EXPORT_SYMBOL_GPL(sdhci_suspend_host);
  3150. int sdhci_resume_host(struct sdhci_host *host)
  3151. {
  3152. struct mmc_host *mmc = host->mmc;
  3153. int ret = 0;
  3154. if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  3155. if (host->ops->enable_dma)
  3156. host->ops->enable_dma(host);
  3157. }
  3158. if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
  3159. (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
  3160. /* Card keeps power but host controller does not */
  3161. sdhci_init(host, 0);
  3162. host->pwr = 0;
  3163. host->clock = 0;
  3164. host->reinit_uhs = true;
  3165. mmc->ops->set_ios(mmc, &mmc->ios);
  3166. } else {
  3167. sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
  3168. }
  3169. if (host->irq_wake_enabled) {
  3170. sdhci_disable_irq_wakeups(host);
  3171. } else {
  3172. ret = request_threaded_irq(host->irq, sdhci_irq,
  3173. sdhci_thread_irq, IRQF_SHARED,
  3174. mmc_hostname(mmc), host);
  3175. if (ret)
  3176. return ret;
  3177. }
  3178. sdhci_enable_card_detection(host);
  3179. return ret;
  3180. }
  3181. EXPORT_SYMBOL_GPL(sdhci_resume_host);
  3182. int sdhci_runtime_suspend_host(struct sdhci_host *host)
  3183. {
  3184. unsigned long flags;
  3185. mmc_retune_timer_stop(host->mmc);
  3186. spin_lock_irqsave(&host->lock, flags);
  3187. host->ier &= SDHCI_INT_CARD_INT;
  3188. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  3189. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  3190. spin_unlock_irqrestore(&host->lock, flags);
  3191. synchronize_hardirq(host->irq);
  3192. spin_lock_irqsave(&host->lock, flags);
  3193. host->runtime_suspended = true;
  3194. spin_unlock_irqrestore(&host->lock, flags);
  3195. return 0;
  3196. }
  3197. EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
  3198. int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
  3199. {
  3200. struct mmc_host *mmc = host->mmc;
  3201. unsigned long flags;
  3202. int host_flags = host->flags;
  3203. if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  3204. if (host->ops->enable_dma)
  3205. host->ops->enable_dma(host);
  3206. }
  3207. sdhci_init(host, soft_reset);
  3208. if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
  3209. mmc->ios.power_mode != MMC_POWER_OFF) {
  3210. /* Force clock and power re-program */
  3211. host->pwr = 0;
  3212. host->clock = 0;
  3213. host->reinit_uhs = true;
  3214. mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
  3215. mmc->ops->set_ios(mmc, &mmc->ios);
  3216. if ((host_flags & SDHCI_PV_ENABLED) &&
  3217. !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
  3218. spin_lock_irqsave(&host->lock, flags);
  3219. sdhci_enable_preset_value(host, true);
  3220. spin_unlock_irqrestore(&host->lock, flags);
  3221. }
  3222. if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
  3223. mmc->ops->hs400_enhanced_strobe)
  3224. mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
  3225. }
  3226. spin_lock_irqsave(&host->lock, flags);
  3227. host->runtime_suspended = false;
  3228. /* Enable SDIO IRQ */
  3229. if (sdio_irq_claimed(mmc))
  3230. sdhci_enable_sdio_irq_nolock(host, true);
  3231. /* Enable Card Detection */
  3232. sdhci_enable_card_detection(host);
  3233. spin_unlock_irqrestore(&host->lock, flags);
  3234. return 0;
  3235. }
  3236. EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
  3237. #endif /* CONFIG_PM */
  3238. /*****************************************************************************\
  3239. * *
  3240. * Command Queue Engine (CQE) helpers *
  3241. * *
  3242. \*****************************************************************************/
  3243. void sdhci_cqe_enable(struct mmc_host *mmc)
  3244. {
  3245. struct sdhci_host *host = mmc_priv(mmc);
  3246. unsigned long flags;
  3247. u8 ctrl;
  3248. spin_lock_irqsave(&host->lock, flags);
  3249. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  3250. ctrl &= ~SDHCI_CTRL_DMA_MASK;
  3251. /*
  3252. * Host from V4.10 supports ADMA3 DMA type.
  3253. * ADMA3 performs integrated descriptor which is more suitable
  3254. * for cmd queuing to fetch both command and transfer descriptors.
  3255. */
  3256. if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
  3257. ctrl |= SDHCI_CTRL_ADMA3;
  3258. else if (host->flags & SDHCI_USE_64_BIT_DMA)
  3259. ctrl |= SDHCI_CTRL_ADMA64;
  3260. else
  3261. ctrl |= SDHCI_CTRL_ADMA32;
  3262. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  3263. sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
  3264. SDHCI_BLOCK_SIZE);
  3265. /* Set maximum timeout */
  3266. sdhci_set_timeout(host, NULL);
  3267. host->ier = host->cqe_ier;
  3268. sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  3269. sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  3270. host->cqe_on = true;
  3271. pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
  3272. mmc_hostname(mmc), host->ier,
  3273. sdhci_readl(host, SDHCI_INT_STATUS));
  3274. spin_unlock_irqrestore(&host->lock, flags);
  3275. }
  3276. EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
  3277. void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
  3278. {
  3279. struct sdhci_host *host = mmc_priv(mmc);
  3280. unsigned long flags;
  3281. spin_lock_irqsave(&host->lock, flags);
  3282. sdhci_set_default_irqs(host);
  3283. host->cqe_on = false;
  3284. if (recovery)
  3285. sdhci_reset_for(host, CQE_RECOVERY);
  3286. pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
  3287. mmc_hostname(mmc), host->ier,
  3288. sdhci_readl(host, SDHCI_INT_STATUS));
  3289. spin_unlock_irqrestore(&host->lock, flags);
  3290. }
  3291. EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
  3292. bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
  3293. int *data_error)
  3294. {
  3295. u32 mask;
  3296. if (!host->cqe_on)
  3297. return false;
  3298. if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
  3299. *cmd_error = -EILSEQ;
  3300. if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
  3301. sdhci_err_stats_inc(host, CMD_CRC);
  3302. } else if (intmask & SDHCI_INT_TIMEOUT) {
  3303. *cmd_error = -ETIMEDOUT;
  3304. sdhci_err_stats_inc(host, CMD_TIMEOUT);
  3305. } else
  3306. *cmd_error = 0;
  3307. if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
  3308. *data_error = -EILSEQ;
  3309. if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
  3310. sdhci_err_stats_inc(host, DAT_CRC);
  3311. } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
  3312. *data_error = -ETIMEDOUT;
  3313. sdhci_err_stats_inc(host, DAT_TIMEOUT);
  3314. } else if (intmask & SDHCI_INT_ADMA_ERROR) {
  3315. *data_error = -EIO;
  3316. sdhci_err_stats_inc(host, ADMA);
  3317. } else
  3318. *data_error = 0;
  3319. /* Clear selected interrupts. */
  3320. mask = intmask & host->cqe_ier;
  3321. sdhci_writel(host, mask, SDHCI_INT_STATUS);
  3322. if (intmask & SDHCI_INT_BUS_POWER)
  3323. pr_err("%s: Card is consuming too much power!\n",
  3324. mmc_hostname(host->mmc));
  3325. intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
  3326. if (intmask) {
  3327. sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  3328. pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
  3329. mmc_hostname(host->mmc), intmask);
  3330. sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
  3331. sdhci_dumpregs(host);
  3332. }
  3333. return true;
  3334. }
  3335. EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
  3336. /*****************************************************************************\
  3337. * *
  3338. * Device allocation/registration *
  3339. * *
  3340. \*****************************************************************************/
  3341. struct sdhci_host *sdhci_alloc_host(struct device *dev,
  3342. size_t priv_size)
  3343. {
  3344. struct mmc_host *mmc;
  3345. struct sdhci_host *host;
  3346. WARN_ON(dev == NULL);
  3347. mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
  3348. if (!mmc)
  3349. return ERR_PTR(-ENOMEM);
  3350. host = mmc_priv(mmc);
  3351. host->mmc = mmc;
  3352. host->mmc_host_ops = sdhci_ops;
  3353. mmc->ops = &host->mmc_host_ops;
  3354. host->flags = SDHCI_SIGNALING_330;
  3355. host->cqe_ier = SDHCI_CQE_INT_MASK;
  3356. host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
  3357. host->tuning_delay = -1;
  3358. host->tuning_loop_count = MAX_TUNING_LOOP;
  3359. host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
  3360. /*
  3361. * The DMA table descriptor count is calculated as the maximum
  3362. * number of segments times 2, to allow for an alignment
  3363. * descriptor for each segment, plus 1 for a nop end descriptor.
  3364. */
  3365. host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
  3366. host->max_adma = 65536;
  3367. host->max_timeout_count = 0xE;
  3368. return host;
  3369. }
  3370. EXPORT_SYMBOL_GPL(sdhci_alloc_host);
  3371. static int sdhci_set_dma_mask(struct sdhci_host *host)
  3372. {
  3373. struct mmc_host *mmc = host->mmc;
  3374. struct device *dev = mmc_dev(mmc);
  3375. int ret = -EINVAL;
  3376. if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
  3377. host->flags &= ~SDHCI_USE_64_BIT_DMA;
  3378. /* Try 64-bit mask if hardware is capable of it */
  3379. if (host->flags & SDHCI_USE_64_BIT_DMA) {
  3380. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  3381. if (ret) {
  3382. pr_warn("%s: Failed to set 64-bit DMA mask.\n",
  3383. mmc_hostname(mmc));
  3384. host->flags &= ~SDHCI_USE_64_BIT_DMA;
  3385. }
  3386. }
  3387. /* 32-bit mask as default & fallback */
  3388. if (ret) {
  3389. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  3390. if (ret)
  3391. pr_warn("%s: Failed to set 32-bit DMA mask.\n",
  3392. mmc_hostname(mmc));
  3393. }
  3394. return ret;
  3395. }
  3396. void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
  3397. const u32 *caps, const u32 *caps1)
  3398. {
  3399. u16 v;
  3400. u64 dt_caps_mask = 0;
  3401. u64 dt_caps = 0;
  3402. if (host->read_caps)
  3403. return;
  3404. host->read_caps = true;
  3405. if (debug_quirks)
  3406. host->quirks = debug_quirks;
  3407. if (debug_quirks2)
  3408. host->quirks2 = debug_quirks2;
  3409. sdhci_reset_for_all(host);
  3410. if (host->v4_mode)
  3411. sdhci_do_enable_v4_mode(host);
  3412. device_property_read_u64(mmc_dev(host->mmc),
  3413. "sdhci-caps-mask", &dt_caps_mask);
  3414. device_property_read_u64(mmc_dev(host->mmc),
  3415. "sdhci-caps", &dt_caps);
  3416. v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
  3417. host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
  3418. if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
  3419. return;
  3420. if (caps) {
  3421. host->caps = *caps;
  3422. } else {
  3423. host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
  3424. host->caps &= ~lower_32_bits(dt_caps_mask);
  3425. host->caps |= lower_32_bits(dt_caps);
  3426. }
  3427. if (host->version < SDHCI_SPEC_300)
  3428. return;
  3429. if (caps1) {
  3430. host->caps1 = *caps1;
  3431. } else {
  3432. host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
  3433. host->caps1 &= ~upper_32_bits(dt_caps_mask);
  3434. host->caps1 |= upper_32_bits(dt_caps);
  3435. }
  3436. }
  3437. EXPORT_SYMBOL_GPL(__sdhci_read_caps);
  3438. static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
  3439. {
  3440. struct mmc_host *mmc = host->mmc;
  3441. unsigned int max_blocks;
  3442. unsigned int bounce_size;
  3443. int ret;
  3444. /*
  3445. * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
  3446. * has diminishing returns, this is probably because SD/MMC
  3447. * cards are usually optimized to handle this size of requests.
  3448. */
  3449. bounce_size = SZ_64K;
  3450. /*
  3451. * Adjust downwards to maximum request size if this is less
  3452. * than our segment size, else hammer down the maximum
  3453. * request size to the maximum buffer size.
  3454. */
  3455. if (mmc->max_req_size < bounce_size)
  3456. bounce_size = mmc->max_req_size;
  3457. max_blocks = bounce_size / 512;
  3458. /*
  3459. * When we just support one segment, we can get significant
  3460. * speedups by the help of a bounce buffer to group scattered
  3461. * reads/writes together.
  3462. */
  3463. host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
  3464. bounce_size,
  3465. GFP_KERNEL);
  3466. if (!host->bounce_buffer) {
  3467. pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
  3468. mmc_hostname(mmc),
  3469. bounce_size);
  3470. /*
  3471. * Exiting with zero here makes sure we proceed with
  3472. * mmc->max_segs == 1.
  3473. */
  3474. return;
  3475. }
  3476. host->bounce_addr = dma_map_single(mmc_dev(mmc),
  3477. host->bounce_buffer,
  3478. bounce_size,
  3479. DMA_BIDIRECTIONAL);
  3480. ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
  3481. if (ret) {
  3482. devm_kfree(mmc_dev(mmc), host->bounce_buffer);
  3483. host->bounce_buffer = NULL;
  3484. /* Again fall back to max_segs == 1 */
  3485. return;
  3486. }
  3487. host->bounce_buffer_size = bounce_size;
  3488. /* Lie about this since we're bouncing */
  3489. mmc->max_segs = max_blocks;
  3490. mmc->max_seg_size = bounce_size;
  3491. mmc->max_req_size = bounce_size;
  3492. pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
  3493. mmc_hostname(mmc), max_blocks, bounce_size);
  3494. }
  3495. static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
  3496. {
  3497. /*
  3498. * According to SD Host Controller spec v4.10, bit[27] added from
  3499. * version 4.10 in Capabilities Register is used as 64-bit System
  3500. * Address support for V4 mode.
  3501. */
  3502. if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
  3503. return host->caps & SDHCI_CAN_64BIT_V4;
  3504. return host->caps & SDHCI_CAN_64BIT;
  3505. }
  3506. int sdhci_setup_host(struct sdhci_host *host)
  3507. {
  3508. struct mmc_host *mmc;
  3509. u32 max_current_caps;
  3510. unsigned int ocr_avail;
  3511. unsigned int override_timeout_clk;
  3512. u32 max_clk;
  3513. int ret = 0;
  3514. bool enable_vqmmc = false;
  3515. WARN_ON(host == NULL);
  3516. if (host == NULL)
  3517. return -EINVAL;
  3518. mmc = host->mmc;
  3519. /*
  3520. * If there are external regulators, get them. Note this must be done
  3521. * early before resetting the host and reading the capabilities so that
  3522. * the host can take the appropriate action if regulators are not
  3523. * available.
  3524. */
  3525. if (!mmc->supply.vqmmc) {
  3526. ret = mmc_regulator_get_supply(mmc);
  3527. if (ret)
  3528. return ret;
  3529. enable_vqmmc = true;
  3530. }
  3531. DBG("Version: 0x%08x | Present: 0x%08x\n",
  3532. sdhci_readw(host, SDHCI_HOST_VERSION),
  3533. sdhci_readl(host, SDHCI_PRESENT_STATE));
  3534. DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
  3535. sdhci_readl(host, SDHCI_CAPABILITIES),
  3536. sdhci_readl(host, SDHCI_CAPABILITIES_1));
  3537. sdhci_read_caps(host);
  3538. override_timeout_clk = host->timeout_clk;
  3539. if (host->version > SDHCI_SPEC_420) {
  3540. pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
  3541. mmc_hostname(mmc), host->version);
  3542. }
  3543. if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
  3544. host->flags |= SDHCI_USE_SDMA;
  3545. else if (!(host->caps & SDHCI_CAN_DO_SDMA))
  3546. DBG("Controller doesn't have SDMA capability\n");
  3547. else
  3548. host->flags |= SDHCI_USE_SDMA;
  3549. if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
  3550. (host->flags & SDHCI_USE_SDMA)) {
  3551. DBG("Disabling DMA as it is marked broken\n");
  3552. host->flags &= ~SDHCI_USE_SDMA;
  3553. }
  3554. if ((host->version >= SDHCI_SPEC_200) &&
  3555. (host->caps & SDHCI_CAN_DO_ADMA2))
  3556. host->flags |= SDHCI_USE_ADMA;
  3557. if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
  3558. (host->flags & SDHCI_USE_ADMA)) {
  3559. DBG("Disabling ADMA as it is marked broken\n");
  3560. host->flags &= ~SDHCI_USE_ADMA;
  3561. }
  3562. if (sdhci_can_64bit_dma(host))
  3563. host->flags |= SDHCI_USE_64_BIT_DMA;
  3564. if (host->use_external_dma) {
  3565. ret = sdhci_external_dma_init(host);
  3566. if (ret == -EPROBE_DEFER)
  3567. goto unreg;
  3568. /*
  3569. * Fall back to use the DMA/PIO integrated in standard SDHCI
  3570. * instead of external DMA devices.
  3571. */
  3572. else if (ret)
  3573. sdhci_switch_external_dma(host, false);
  3574. /* Disable internal DMA sources */
  3575. else
  3576. host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
  3577. }
  3578. if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  3579. if (host->ops->set_dma_mask)
  3580. ret = host->ops->set_dma_mask(host);
  3581. else
  3582. ret = sdhci_set_dma_mask(host);
  3583. if (!ret && host->ops->enable_dma)
  3584. ret = host->ops->enable_dma(host);
  3585. if (ret) {
  3586. pr_warn("%s: No suitable DMA available - falling back to PIO\n",
  3587. mmc_hostname(mmc));
  3588. host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
  3589. ret = 0;
  3590. }
  3591. }
  3592. /* SDMA does not support 64-bit DMA if v4 mode not set */
  3593. if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
  3594. host->flags &= ~SDHCI_USE_SDMA;
  3595. if (host->flags & SDHCI_USE_ADMA) {
  3596. dma_addr_t dma;
  3597. void *buf;
  3598. if (!(host->flags & SDHCI_USE_64_BIT_DMA))
  3599. host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
  3600. else if (!host->alloc_desc_sz)
  3601. host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
  3602. host->desc_sz = host->alloc_desc_sz;
  3603. host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
  3604. host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
  3605. /*
  3606. * Use zalloc to zero the reserved high 32-bits of 128-bit
  3607. * descriptors so that they never need to be written.
  3608. */
  3609. buf = dma_alloc_coherent(mmc_dev(mmc),
  3610. host->align_buffer_sz + host->adma_table_sz,
  3611. &dma, GFP_KERNEL);
  3612. if (!buf) {
  3613. pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
  3614. mmc_hostname(mmc));
  3615. host->flags &= ~SDHCI_USE_ADMA;
  3616. } else if ((dma + host->align_buffer_sz) &
  3617. (SDHCI_ADMA2_DESC_ALIGN - 1)) {
  3618. pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
  3619. mmc_hostname(mmc));
  3620. host->flags &= ~SDHCI_USE_ADMA;
  3621. dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
  3622. host->adma_table_sz, buf, dma);
  3623. } else {
  3624. host->align_buffer = buf;
  3625. host->align_addr = dma;
  3626. host->adma_table = buf + host->align_buffer_sz;
  3627. host->adma_addr = dma + host->align_buffer_sz;
  3628. }
  3629. }
  3630. /*
  3631. * If we use DMA, then it's up to the caller to set the DMA
  3632. * mask, but PIO does not need the hw shim so we set a new
  3633. * mask here in that case.
  3634. */
  3635. if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
  3636. host->dma_mask = DMA_BIT_MASK(64);
  3637. mmc_dev(mmc)->dma_mask = &host->dma_mask;
  3638. }
  3639. if (host->version >= SDHCI_SPEC_300)
  3640. host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
  3641. else
  3642. host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
  3643. host->max_clk *= 1000000;
  3644. if (host->max_clk == 0 || host->quirks &
  3645. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
  3646. if (!host->ops->get_max_clock) {
  3647. pr_err("%s: Hardware doesn't specify base clock frequency.\n",
  3648. mmc_hostname(mmc));
  3649. ret = -ENODEV;
  3650. goto undma;
  3651. }
  3652. host->max_clk = host->ops->get_max_clock(host);
  3653. }
  3654. /*
  3655. * In case of Host Controller v3.00, find out whether clock
  3656. * multiplier is supported.
  3657. */
  3658. host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
  3659. /*
  3660. * In case the value in Clock Multiplier is 0, then programmable
  3661. * clock mode is not supported, otherwise the actual clock
  3662. * multiplier is one more than the value of Clock Multiplier
  3663. * in the Capabilities Register.
  3664. */
  3665. if (host->clk_mul)
  3666. host->clk_mul += 1;
  3667. /*
  3668. * Set host parameters.
  3669. */
  3670. max_clk = host->max_clk;
  3671. if (host->ops->get_min_clock)
  3672. mmc->f_min = host->ops->get_min_clock(host);
  3673. else if (host->version >= SDHCI_SPEC_300) {
  3674. if (host->clk_mul)
  3675. max_clk = host->max_clk * host->clk_mul;
  3676. /*
  3677. * Divided Clock Mode minimum clock rate is always less than
  3678. * Programmable Clock Mode minimum clock rate.
  3679. */
  3680. mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
  3681. } else
  3682. mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
  3683. if (!mmc->f_max || mmc->f_max > max_clk)
  3684. mmc->f_max = max_clk;
  3685. if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
  3686. host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
  3687. if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
  3688. host->timeout_clk *= 1000;
  3689. if (host->timeout_clk == 0) {
  3690. if (!host->ops->get_timeout_clock) {
  3691. pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
  3692. mmc_hostname(mmc));
  3693. ret = -ENODEV;
  3694. goto undma;
  3695. }
  3696. host->timeout_clk =
  3697. DIV_ROUND_UP(host->ops->get_timeout_clock(host),
  3698. 1000);
  3699. }
  3700. if (override_timeout_clk)
  3701. host->timeout_clk = override_timeout_clk;
  3702. mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
  3703. host->ops->get_max_timeout_count(host) : 1 << 27;
  3704. mmc->max_busy_timeout /= host->timeout_clk;
  3705. }
  3706. if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
  3707. !host->ops->get_max_timeout_count)
  3708. mmc->max_busy_timeout = 0;
  3709. mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
  3710. mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
  3711. if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
  3712. host->flags |= SDHCI_AUTO_CMD12;
  3713. /*
  3714. * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
  3715. * For v4 mode, SDMA may use Auto-CMD23 as well.
  3716. */
  3717. if ((host->version >= SDHCI_SPEC_300) &&
  3718. ((host->flags & SDHCI_USE_ADMA) ||
  3719. !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
  3720. !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
  3721. host->flags |= SDHCI_AUTO_CMD23;
  3722. DBG("Auto-CMD23 available\n");
  3723. } else {
  3724. DBG("Auto-CMD23 unavailable\n");
  3725. }
  3726. /*
  3727. * A controller may support 8-bit width, but the board itself
  3728. * might not have the pins brought out. Boards that support
  3729. * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
  3730. * their platform code before calling sdhci_add_host(), and we
  3731. * won't assume 8-bit width for hosts without that CAP.
  3732. */
  3733. if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
  3734. mmc->caps |= MMC_CAP_4_BIT_DATA;
  3735. if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
  3736. mmc->caps &= ~MMC_CAP_CMD23;
  3737. if (host->caps & SDHCI_CAN_DO_HISPD)
  3738. mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
  3739. if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
  3740. mmc_card_is_removable(mmc) &&
  3741. mmc_gpio_get_cd(mmc) < 0)
  3742. mmc->caps |= MMC_CAP_NEEDS_POLL;
  3743. if (!IS_ERR(mmc->supply.vqmmc)) {
  3744. if (enable_vqmmc) {
  3745. ret = regulator_enable(mmc->supply.vqmmc);
  3746. host->sdhci_core_to_disable_vqmmc = !ret;
  3747. }
  3748. /* If vqmmc provides no 1.8V signalling, then there's no UHS */
  3749. if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
  3750. 1950000))
  3751. host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
  3752. SDHCI_SUPPORT_SDR50 |
  3753. SDHCI_SUPPORT_DDR50);
  3754. /* In eMMC case vqmmc might be a fixed 1.8V regulator */
  3755. if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
  3756. 3600000))
  3757. host->flags &= ~SDHCI_SIGNALING_330;
  3758. if (ret) {
  3759. pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
  3760. mmc_hostname(mmc), ret);
  3761. mmc->supply.vqmmc = ERR_PTR(-EINVAL);
  3762. }
  3763. }
  3764. if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
  3765. host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  3766. SDHCI_SUPPORT_DDR50);
  3767. /*
  3768. * The SDHCI controller in a SoC might support HS200/HS400
  3769. * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
  3770. * but if the board is modeled such that the IO lines are not
  3771. * connected to 1.8v then HS200/HS400 cannot be supported.
  3772. * Disable HS200/HS400 if the board does not have 1.8v connected
  3773. * to the IO lines. (Applicable for other modes in 1.8v)
  3774. */
  3775. mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
  3776. mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
  3777. }
  3778. /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
  3779. if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  3780. SDHCI_SUPPORT_DDR50))
  3781. mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
  3782. /* SDR104 supports also implies SDR50 support */
  3783. if (host->caps1 & SDHCI_SUPPORT_SDR104) {
  3784. mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
  3785. /* SD3.0: SDR104 is supported so (for eMMC) the caps2
  3786. * field can be promoted to support HS200.
  3787. */
  3788. if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
  3789. mmc->caps2 |= MMC_CAP2_HS200;
  3790. } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
  3791. mmc->caps |= MMC_CAP_UHS_SDR50;
  3792. }
  3793. if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
  3794. (host->caps1 & SDHCI_SUPPORT_HS400))
  3795. mmc->caps2 |= MMC_CAP2_HS400;
  3796. if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
  3797. (IS_ERR(mmc->supply.vqmmc) ||
  3798. !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
  3799. 1300000)))
  3800. mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
  3801. if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
  3802. !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
  3803. mmc->caps |= MMC_CAP_UHS_DDR50;
  3804. /* Does the host need tuning for SDR50? */
  3805. if (host->caps1 & SDHCI_USE_SDR50_TUNING)
  3806. host->flags |= SDHCI_SDR50_NEEDS_TUNING;
  3807. /* Driver Type(s) (A, C, D) supported by the host */
  3808. if (host->caps1 & SDHCI_DRIVER_TYPE_A)
  3809. mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
  3810. if (host->caps1 & SDHCI_DRIVER_TYPE_C)
  3811. mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
  3812. if (host->caps1 & SDHCI_DRIVER_TYPE_D)
  3813. mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
  3814. /* Initial value for re-tuning timer count */
  3815. host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
  3816. host->caps1);
  3817. /*
  3818. * In case Re-tuning Timer is not disabled, the actual value of
  3819. * re-tuning timer will be 2 ^ (n - 1).
  3820. */
  3821. if (host->tuning_count)
  3822. host->tuning_count = 1 << (host->tuning_count - 1);
  3823. /* Re-tuning mode supported by the Host Controller */
  3824. host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
  3825. ocr_avail = 0;
  3826. /*
  3827. * According to SD Host Controller spec v3.00, if the Host System
  3828. * can afford more than 150mA, Host Driver should set XPC to 1. Also
  3829. * the value is meaningful only if Voltage Support in the Capabilities
  3830. * register is set. The actual current value is 4 times the register
  3831. * value.
  3832. */
  3833. max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
  3834. if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
  3835. int curr = regulator_get_current_limit(mmc->supply.vmmc);
  3836. if (curr > 0) {
  3837. /* convert to SDHCI_MAX_CURRENT format */
  3838. curr = curr/1000; /* convert to mA */
  3839. curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
  3840. curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
  3841. max_current_caps =
  3842. FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
  3843. FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
  3844. FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
  3845. }
  3846. }
  3847. if (host->caps & SDHCI_CAN_VDD_330) {
  3848. ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
  3849. mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
  3850. max_current_caps) *
  3851. SDHCI_MAX_CURRENT_MULTIPLIER;
  3852. }
  3853. if (host->caps & SDHCI_CAN_VDD_300) {
  3854. ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
  3855. mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
  3856. max_current_caps) *
  3857. SDHCI_MAX_CURRENT_MULTIPLIER;
  3858. }
  3859. if (host->caps & SDHCI_CAN_VDD_180) {
  3860. ocr_avail |= MMC_VDD_165_195;
  3861. mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
  3862. max_current_caps) *
  3863. SDHCI_MAX_CURRENT_MULTIPLIER;
  3864. }
  3865. /* If OCR set by host, use it instead. */
  3866. if (host->ocr_mask)
  3867. ocr_avail = host->ocr_mask;
  3868. /* If OCR set by external regulators, give it highest prio. */
  3869. if (mmc->ocr_avail)
  3870. ocr_avail = mmc->ocr_avail;
  3871. mmc->ocr_avail = ocr_avail;
  3872. mmc->ocr_avail_sdio = ocr_avail;
  3873. if (host->ocr_avail_sdio)
  3874. mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
  3875. mmc->ocr_avail_sd = ocr_avail;
  3876. if (host->ocr_avail_sd)
  3877. mmc->ocr_avail_sd &= host->ocr_avail_sd;
  3878. else /* normal SD controllers don't support 1.8V */
  3879. mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
  3880. mmc->ocr_avail_mmc = ocr_avail;
  3881. if (host->ocr_avail_mmc)
  3882. mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
  3883. if (mmc->ocr_avail == 0) {
  3884. pr_err("%s: Hardware doesn't report any support voltages.\n",
  3885. mmc_hostname(mmc));
  3886. ret = -ENODEV;
  3887. goto unreg;
  3888. }
  3889. if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
  3890. MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
  3891. MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
  3892. (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
  3893. host->flags |= SDHCI_SIGNALING_180;
  3894. if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
  3895. host->flags |= SDHCI_SIGNALING_120;
  3896. spin_lock_init(&host->lock);
  3897. /*
  3898. * Maximum number of sectors in one transfer. Limited by SDMA boundary
  3899. * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
  3900. * is less anyway.
  3901. */
  3902. mmc->max_req_size = 524288;
  3903. /*
  3904. * Maximum number of segments. Depends on if the hardware
  3905. * can do scatter/gather or not.
  3906. */
  3907. if (host->flags & SDHCI_USE_ADMA) {
  3908. mmc->max_segs = SDHCI_MAX_SEGS;
  3909. } else if (host->flags & SDHCI_USE_SDMA) {
  3910. mmc->max_segs = 1;
  3911. mmc->max_req_size = min_t(size_t, mmc->max_req_size,
  3912. dma_max_mapping_size(mmc_dev(mmc)));
  3913. } else { /* PIO */
  3914. mmc->max_segs = SDHCI_MAX_SEGS;
  3915. }
  3916. /*
  3917. * Maximum segment size. Could be one segment with the maximum number
  3918. * of bytes. When doing hardware scatter/gather, each entry cannot
  3919. * be larger than 64 KiB though.
  3920. */
  3921. if (host->flags & SDHCI_USE_ADMA) {
  3922. if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
  3923. host->max_adma = 65532; /* 32-bit alignment */
  3924. mmc->max_seg_size = 65535;
  3925. } else {
  3926. mmc->max_seg_size = 65536;
  3927. }
  3928. } else {
  3929. mmc->max_seg_size = mmc->max_req_size;
  3930. }
  3931. /*
  3932. * Maximum block size. This varies from controller to controller and
  3933. * is specified in the capabilities register.
  3934. */
  3935. if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
  3936. mmc->max_blk_size = 2;
  3937. } else {
  3938. mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
  3939. SDHCI_MAX_BLOCK_SHIFT;
  3940. if (mmc->max_blk_size >= 3) {
  3941. pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
  3942. mmc_hostname(mmc));
  3943. mmc->max_blk_size = 0;
  3944. }
  3945. }
  3946. mmc->max_blk_size = 512 << mmc->max_blk_size;
  3947. /*
  3948. * Maximum block count.
  3949. */
  3950. mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
  3951. if (mmc->max_segs == 1)
  3952. /* This may alter mmc->*_blk_* parameters */
  3953. sdhci_allocate_bounce_buffer(host);
  3954. return 0;
  3955. unreg:
  3956. if (host->sdhci_core_to_disable_vqmmc)
  3957. regulator_disable(mmc->supply.vqmmc);
  3958. undma:
  3959. if (host->align_buffer)
  3960. dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
  3961. host->adma_table_sz, host->align_buffer,
  3962. host->align_addr);
  3963. host->adma_table = NULL;
  3964. host->align_buffer = NULL;
  3965. return ret;
  3966. }
  3967. EXPORT_SYMBOL_GPL(sdhci_setup_host);
  3968. void sdhci_cleanup_host(struct sdhci_host *host)
  3969. {
  3970. struct mmc_host *mmc = host->mmc;
  3971. if (host->sdhci_core_to_disable_vqmmc)
  3972. regulator_disable(mmc->supply.vqmmc);
  3973. if (host->align_buffer)
  3974. dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
  3975. host->adma_table_sz, host->align_buffer,
  3976. host->align_addr);
  3977. if (host->use_external_dma)
  3978. sdhci_external_dma_release(host);
  3979. host->adma_table = NULL;
  3980. host->align_buffer = NULL;
  3981. }
  3982. EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
  3983. int __sdhci_add_host(struct sdhci_host *host)
  3984. {
  3985. unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
  3986. struct mmc_host *mmc = host->mmc;
  3987. int ret;
  3988. if ((mmc->caps2 & MMC_CAP2_CQE) &&
  3989. (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
  3990. mmc->caps2 &= ~MMC_CAP2_CQE;
  3991. mmc->cqe_ops = NULL;
  3992. }
  3993. host->complete_wq = alloc_workqueue("sdhci", flags, 0);
  3994. if (!host->complete_wq)
  3995. return -ENOMEM;
  3996. INIT_WORK(&host->complete_work, sdhci_complete_work);
  3997. timer_setup(&host->timer, sdhci_timeout_timer, 0);
  3998. timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
  3999. init_waitqueue_head(&host->buf_ready_int);
  4000. sdhci_init(host, 0);
  4001. ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
  4002. IRQF_SHARED, mmc_hostname(mmc), host);
  4003. if (ret) {
  4004. pr_err("%s: Failed to request IRQ %d: %d\n",
  4005. mmc_hostname(mmc), host->irq, ret);
  4006. goto unwq;
  4007. }
  4008. ret = sdhci_led_register(host);
  4009. if (ret) {
  4010. pr_err("%s: Failed to register LED device: %d\n",
  4011. mmc_hostname(mmc), ret);
  4012. goto unirq;
  4013. }
  4014. ret = mmc_add_host(mmc);
  4015. if (ret)
  4016. goto unled;
  4017. pr_info("%s: SDHCI controller on %s [%s] using %s\n",
  4018. mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
  4019. host->use_external_dma ? "External DMA" :
  4020. (host->flags & SDHCI_USE_ADMA) ?
  4021. (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
  4022. (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
  4023. sdhci_enable_card_detection(host);
  4024. return 0;
  4025. unled:
  4026. sdhci_led_unregister(host);
  4027. unirq:
  4028. sdhci_reset_for_all(host);
  4029. sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  4030. sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  4031. free_irq(host->irq, host);
  4032. unwq:
  4033. destroy_workqueue(host->complete_wq);
  4034. return ret;
  4035. }
  4036. EXPORT_SYMBOL_GPL(__sdhci_add_host);
  4037. int sdhci_add_host(struct sdhci_host *host)
  4038. {
  4039. int ret;
  4040. ret = sdhci_setup_host(host);
  4041. if (ret)
  4042. return ret;
  4043. ret = __sdhci_add_host(host);
  4044. if (ret)
  4045. goto cleanup;
  4046. return 0;
  4047. cleanup:
  4048. sdhci_cleanup_host(host);
  4049. return ret;
  4050. }
  4051. EXPORT_SYMBOL_GPL(sdhci_add_host);
  4052. void sdhci_remove_host(struct sdhci_host *host, int dead)
  4053. {
  4054. struct mmc_host *mmc = host->mmc;
  4055. unsigned long flags;
  4056. if (dead) {
  4057. spin_lock_irqsave(&host->lock, flags);
  4058. host->flags |= SDHCI_DEVICE_DEAD;
  4059. if (sdhci_has_requests(host)) {
  4060. pr_err("%s: Controller removed during "
  4061. " transfer!\n", mmc_hostname(mmc));
  4062. sdhci_error_out_mrqs(host, -ENOMEDIUM);
  4063. }
  4064. spin_unlock_irqrestore(&host->lock, flags);
  4065. }
  4066. sdhci_disable_card_detection(host);
  4067. mmc_remove_host(mmc);
  4068. sdhci_led_unregister(host);
  4069. if (!dead)
  4070. sdhci_reset_for_all(host);
  4071. sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  4072. sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  4073. free_irq(host->irq, host);
  4074. del_timer_sync(&host->timer);
  4075. del_timer_sync(&host->data_timer);
  4076. destroy_workqueue(host->complete_wq);
  4077. if (host->sdhci_core_to_disable_vqmmc)
  4078. regulator_disable(mmc->supply.vqmmc);
  4079. if (host->align_buffer)
  4080. dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
  4081. host->adma_table_sz, host->align_buffer,
  4082. host->align_addr);
  4083. if (host->use_external_dma)
  4084. sdhci_external_dma_release(host);
  4085. host->adma_table = NULL;
  4086. host->align_buffer = NULL;
  4087. }
  4088. EXPORT_SYMBOL_GPL(sdhci_remove_host);
  4089. void sdhci_free_host(struct sdhci_host *host)
  4090. {
  4091. mmc_free_host(host->mmc);
  4092. }
  4093. EXPORT_SYMBOL_GPL(sdhci_free_host);
  4094. /*****************************************************************************\
  4095. * *
  4096. * Driver init/exit *
  4097. * *
  4098. \*****************************************************************************/
  4099. static int __init sdhci_drv_init(void)
  4100. {
  4101. pr_info(DRIVER_NAME
  4102. ": Secure Digital Host Controller Interface driver\n");
  4103. pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
  4104. return 0;
  4105. }
  4106. static void __exit sdhci_drv_exit(void)
  4107. {
  4108. }
  4109. module_init(sdhci_drv_init);
  4110. module_exit(sdhci_drv_exit);
  4111. module_param(debug_quirks, uint, 0444);
  4112. module_param(debug_quirks2, uint, 0444);
  4113. MODULE_AUTHOR("Pierre Ossman <[email protected]>");
  4114. MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
  4115. MODULE_LICENSE("GPL");
  4116. MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
  4117. MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");