tegra-xudc.c 102 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * NVIDIA Tegra XUSB device mode controller
  4. *
  5. * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
  6. * Copyright (c) 2015, Google Inc.
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/completion.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_device.h>
  19. #include <linux/phy/phy.h>
  20. #include <linux/phy/tegra/xusb.h>
  21. #include <linux/pm_domain.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/reset.h>
  26. #include <linux/usb/ch9.h>
  27. #include <linux/usb/gadget.h>
  28. #include <linux/usb/otg.h>
  29. #include <linux/usb/role.h>
  30. #include <linux/usb/phy.h>
  31. #include <linux/workqueue.h>
  32. /* XUSB_DEV registers */
  33. #define DB 0x004
  34. #define DB_TARGET_MASK GENMASK(15, 8)
  35. #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
  36. #define DB_STREAMID_MASK GENMASK(31, 16)
  37. #define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
  38. #define ERSTSZ 0x008
  39. #define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
  40. #define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
  41. #define ERSTXBALO(x) (0x010 + 8 * (x))
  42. #define ERSTXBAHI(x) (0x014 + 8 * (x))
  43. #define ERDPLO 0x020
  44. #define ERDPLO_EHB BIT(3)
  45. #define ERDPHI 0x024
  46. #define EREPLO 0x028
  47. #define EREPLO_ECS BIT(0)
  48. #define EREPLO_SEGI BIT(1)
  49. #define EREPHI 0x02c
  50. #define CTRL 0x030
  51. #define CTRL_RUN BIT(0)
  52. #define CTRL_LSE BIT(1)
  53. #define CTRL_IE BIT(4)
  54. #define CTRL_SMI_EVT BIT(5)
  55. #define CTRL_SMI_DSE BIT(6)
  56. #define CTRL_EWE BIT(7)
  57. #define CTRL_DEVADDR_MASK GENMASK(30, 24)
  58. #define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
  59. #define CTRL_ENABLE BIT(31)
  60. #define ST 0x034
  61. #define ST_RC BIT(0)
  62. #define ST_IP BIT(4)
  63. #define RT_IMOD 0x038
  64. #define RT_IMOD_IMODI_MASK GENMASK(15, 0)
  65. #define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
  66. #define RT_IMOD_IMODC_MASK GENMASK(31, 16)
  67. #define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
  68. #define PORTSC 0x03c
  69. #define PORTSC_CCS BIT(0)
  70. #define PORTSC_PED BIT(1)
  71. #define PORTSC_PR BIT(4)
  72. #define PORTSC_PLS_SHIFT 5
  73. #define PORTSC_PLS_MASK GENMASK(8, 5)
  74. #define PORTSC_PLS_U0 0x0
  75. #define PORTSC_PLS_U2 0x2
  76. #define PORTSC_PLS_U3 0x3
  77. #define PORTSC_PLS_DISABLED 0x4
  78. #define PORTSC_PLS_RXDETECT 0x5
  79. #define PORTSC_PLS_INACTIVE 0x6
  80. #define PORTSC_PLS_RESUME 0xf
  81. #define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
  82. #define PORTSC_PS_SHIFT 10
  83. #define PORTSC_PS_MASK GENMASK(13, 10)
  84. #define PORTSC_PS_UNDEFINED 0x0
  85. #define PORTSC_PS_FS 0x1
  86. #define PORTSC_PS_LS 0x2
  87. #define PORTSC_PS_HS 0x3
  88. #define PORTSC_PS_SS 0x4
  89. #define PORTSC_LWS BIT(16)
  90. #define PORTSC_CSC BIT(17)
  91. #define PORTSC_WRC BIT(19)
  92. #define PORTSC_PRC BIT(21)
  93. #define PORTSC_PLC BIT(22)
  94. #define PORTSC_CEC BIT(23)
  95. #define PORTSC_WPR BIT(30)
  96. #define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
  97. PORTSC_PLC | PORTSC_CEC)
  98. #define ECPLO 0x040
  99. #define ECPHI 0x044
  100. #define MFINDEX 0x048
  101. #define MFINDEX_FRAME_SHIFT 3
  102. #define MFINDEX_FRAME_MASK GENMASK(13, 3)
  103. #define PORTPM 0x04c
  104. #define PORTPM_L1S_MASK GENMASK(1, 0)
  105. #define PORTPM_L1S_DROP 0x0
  106. #define PORTPM_L1S_ACCEPT 0x1
  107. #define PORTPM_L1S_NYET 0x2
  108. #define PORTPM_L1S_STALL 0x3
  109. #define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
  110. #define PORTPM_RWE BIT(3)
  111. #define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
  112. #define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
  113. #define PORTPM_FLA BIT(24)
  114. #define PORTPM_VBA BIT(25)
  115. #define PORTPM_WOC BIT(26)
  116. #define PORTPM_WOD BIT(27)
  117. #define PORTPM_U1E BIT(28)
  118. #define PORTPM_U2E BIT(29)
  119. #define PORTPM_FRWE BIT(30)
  120. #define PORTPM_PNG_CYA BIT(31)
  121. #define EP_HALT 0x050
  122. #define EP_PAUSE 0x054
  123. #define EP_RELOAD 0x058
  124. #define EP_STCHG 0x05c
  125. #define DEVNOTIF_LO 0x064
  126. #define DEVNOTIF_LO_TRIG BIT(0)
  127. #define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
  128. #define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
  129. #define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
  130. #define DEVNOTIF_HI 0x068
  131. #define PORTHALT 0x06c
  132. #define PORTHALT_HALT_LTSSM BIT(0)
  133. #define PORTHALT_HALT_REJECT BIT(1)
  134. #define PORTHALT_STCHG_REQ BIT(20)
  135. #define PORTHALT_STCHG_INTR_EN BIT(24)
  136. #define PORT_TM 0x070
  137. #define EP_THREAD_ACTIVE 0x074
  138. #define EP_STOPPED 0x078
  139. #define HSFSPI_COUNT0 0x100
  140. #define HSFSPI_COUNT13 0x134
  141. #define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
  142. #define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
  143. HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
  144. #define BLCG 0x840
  145. #define SSPX_CORE_CNT0 0x610
  146. #define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
  147. #define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
  148. #define SSPX_CORE_CNT30 0x688
  149. #define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
  150. #define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
  151. SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
  152. #define SSPX_CORE_CNT32 0x690
  153. #define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
  154. #define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
  155. SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
  156. #define SSPX_CORE_CNT56 0x6fc
  157. #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
  158. #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
  159. SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
  160. #define SSPX_CORE_CNT57 0x700
  161. #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
  162. #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
  163. SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
  164. #define SSPX_CORE_CNT65 0x720
  165. #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
  166. #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
  167. SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
  168. #define SSPX_CORE_CNT66 0x724
  169. #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
  170. #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
  171. SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
  172. #define SSPX_CORE_CNT67 0x728
  173. #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
  174. #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
  175. SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
  176. #define SSPX_CORE_CNT72 0x73c
  177. #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
  178. #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
  179. SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
  180. #define SSPX_CORE_PADCTL4 0x750
  181. #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
  182. #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
  183. SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
  184. #define BLCG_DFPCI BIT(0)
  185. #define BLCG_UFPCI BIT(1)
  186. #define BLCG_FE BIT(2)
  187. #define BLCG_COREPLL_PWRDN BIT(8)
  188. #define BLCG_IOPLL_0_PWRDN BIT(9)
  189. #define BLCG_IOPLL_1_PWRDN BIT(10)
  190. #define BLCG_IOPLL_2_PWRDN BIT(11)
  191. #define BLCG_ALL 0x1ff
  192. #define CFG_DEV_SSPI_XFER 0x858
  193. #define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
  194. #define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
  195. CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
  196. #define CFG_DEV_FE 0x85c
  197. #define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
  198. #define CFG_DEV_FE_PORTREGSEL_SS_PI 1
  199. #define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
  200. #define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
  201. #define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
  202. /* FPCI registers */
  203. #define XUSB_DEV_CFG_1 0x004
  204. #define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
  205. #define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
  206. #define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
  207. #define XUSB_DEV_CFG_4 0x010
  208. #define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
  209. #define XUSB_DEV_CFG_5 0x014
  210. /* IPFS registers */
  211. #define XUSB_DEV_CONFIGURATION_0 0x180
  212. #define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
  213. #define XUSB_DEV_INTR_MASK_0 0x188
  214. #define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
  215. struct tegra_xudc_ep_context {
  216. __le32 info0;
  217. __le32 info1;
  218. __le32 deq_lo;
  219. __le32 deq_hi;
  220. __le32 tx_info;
  221. __le32 rsvd[11];
  222. };
  223. #define EP_STATE_DISABLED 0
  224. #define EP_STATE_RUNNING 1
  225. #define EP_STATE_HALTED 2
  226. #define EP_STATE_STOPPED 3
  227. #define EP_STATE_ERROR 4
  228. #define EP_TYPE_INVALID 0
  229. #define EP_TYPE_ISOCH_OUT 1
  230. #define EP_TYPE_BULK_OUT 2
  231. #define EP_TYPE_INTERRUPT_OUT 3
  232. #define EP_TYPE_CONTROL 4
  233. #define EP_TYPE_ISCOH_IN 5
  234. #define EP_TYPE_BULK_IN 6
  235. #define EP_TYPE_INTERRUPT_IN 7
  236. #define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
  237. static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
  238. { \
  239. return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
  240. } \
  241. static inline void \
  242. ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
  243. { \
  244. u32 tmp; \
  245. \
  246. tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
  247. tmp |= (val & (mask)) << (shift); \
  248. ctx->member = cpu_to_le32(tmp); \
  249. }
  250. BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
  251. BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
  252. BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
  253. BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
  254. BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
  255. BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
  256. BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
  257. BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
  258. BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
  259. BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
  260. BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
  261. BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
  262. BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
  263. BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
  264. BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
  265. BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
  266. BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
  267. BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
  268. BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
  269. BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
  270. BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
  271. BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
  272. BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
  273. BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
  274. static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
  275. {
  276. return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
  277. (ep_ctx_read_deq_lo(ctx) << 4);
  278. }
  279. static inline void
  280. ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
  281. {
  282. ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
  283. ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
  284. }
  285. struct tegra_xudc_trb {
  286. __le32 data_lo;
  287. __le32 data_hi;
  288. __le32 status;
  289. __le32 control;
  290. };
  291. #define TRB_TYPE_RSVD 0
  292. #define TRB_TYPE_NORMAL 1
  293. #define TRB_TYPE_SETUP_STAGE 2
  294. #define TRB_TYPE_DATA_STAGE 3
  295. #define TRB_TYPE_STATUS_STAGE 4
  296. #define TRB_TYPE_ISOCH 5
  297. #define TRB_TYPE_LINK 6
  298. #define TRB_TYPE_TRANSFER_EVENT 32
  299. #define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
  300. #define TRB_TYPE_STREAM 48
  301. #define TRB_TYPE_SETUP_PACKET_EVENT 63
  302. #define TRB_CMPL_CODE_INVALID 0
  303. #define TRB_CMPL_CODE_SUCCESS 1
  304. #define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
  305. #define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
  306. #define TRB_CMPL_CODE_USB_TRANS_ERR 4
  307. #define TRB_CMPL_CODE_TRB_ERR 5
  308. #define TRB_CMPL_CODE_STALL 6
  309. #define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
  310. #define TRB_CMPL_CODE_SHORT_PACKET 13
  311. #define TRB_CMPL_CODE_RING_UNDERRUN 14
  312. #define TRB_CMPL_CODE_RING_OVERRUN 15
  313. #define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
  314. #define TRB_CMPL_CODE_STOPPED 26
  315. #define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
  316. #define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
  317. #define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
  318. #define TRB_CMPL_CODE_HOST_REJECTED 221
  319. #define TRB_CMPL_CODE_CTRL_DIR_ERR 222
  320. #define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
  321. #define BUILD_TRB_RW(name, member, shift, mask) \
  322. static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
  323. { \
  324. return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
  325. } \
  326. static inline void \
  327. trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
  328. { \
  329. u32 tmp; \
  330. \
  331. tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
  332. tmp |= (val & (mask)) << (shift); \
  333. trb->member = cpu_to_le32(tmp); \
  334. }
  335. BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
  336. BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
  337. BUILD_TRB_RW(seq_num, status, 0, 0xffff)
  338. BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
  339. BUILD_TRB_RW(td_size, status, 17, 0x1f)
  340. BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
  341. BUILD_TRB_RW(cycle, control, 0, 0x1)
  342. BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
  343. BUILD_TRB_RW(isp, control, 2, 0x1)
  344. BUILD_TRB_RW(chain, control, 4, 0x1)
  345. BUILD_TRB_RW(ioc, control, 5, 0x1)
  346. BUILD_TRB_RW(type, control, 10, 0x3f)
  347. BUILD_TRB_RW(stream_id, control, 16, 0xffff)
  348. BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
  349. BUILD_TRB_RW(tlbpc, control, 16, 0xf)
  350. BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
  351. BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
  352. BUILD_TRB_RW(sia, control, 31, 0x1)
  353. static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
  354. {
  355. return ((u64)trb_read_data_hi(trb) << 32) |
  356. trb_read_data_lo(trb);
  357. }
  358. static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
  359. {
  360. trb_write_data_lo(trb, lower_32_bits(addr));
  361. trb_write_data_hi(trb, upper_32_bits(addr));
  362. }
  363. struct tegra_xudc_request {
  364. struct usb_request usb_req;
  365. size_t buf_queued;
  366. unsigned int trbs_queued;
  367. unsigned int trbs_needed;
  368. bool need_zlp;
  369. struct tegra_xudc_trb *first_trb;
  370. struct tegra_xudc_trb *last_trb;
  371. struct list_head list;
  372. };
  373. struct tegra_xudc_ep {
  374. struct tegra_xudc *xudc;
  375. struct usb_ep usb_ep;
  376. unsigned int index;
  377. char name[8];
  378. struct tegra_xudc_ep_context *context;
  379. #define XUDC_TRANSFER_RING_SIZE 64
  380. struct tegra_xudc_trb *transfer_ring;
  381. dma_addr_t transfer_ring_phys;
  382. unsigned int enq_ptr;
  383. unsigned int deq_ptr;
  384. bool pcs;
  385. bool ring_full;
  386. bool stream_rejected;
  387. struct list_head queue;
  388. const struct usb_endpoint_descriptor *desc;
  389. const struct usb_ss_ep_comp_descriptor *comp_desc;
  390. };
  391. struct tegra_xudc_sel_timing {
  392. __u8 u1sel;
  393. __u8 u1pel;
  394. __le16 u2sel;
  395. __le16 u2pel;
  396. };
  397. enum tegra_xudc_setup_state {
  398. WAIT_FOR_SETUP,
  399. DATA_STAGE_XFER,
  400. DATA_STAGE_RECV,
  401. STATUS_STAGE_XFER,
  402. STATUS_STAGE_RECV,
  403. };
  404. struct tegra_xudc_setup_packet {
  405. struct usb_ctrlrequest ctrl_req;
  406. unsigned int seq_num;
  407. };
  408. struct tegra_xudc_save_regs {
  409. u32 ctrl;
  410. u32 portpm;
  411. };
  412. struct tegra_xudc {
  413. struct device *dev;
  414. const struct tegra_xudc_soc *soc;
  415. struct tegra_xusb_padctl *padctl;
  416. spinlock_t lock;
  417. struct usb_gadget gadget;
  418. struct usb_gadget_driver *driver;
  419. #define XUDC_NR_EVENT_RINGS 2
  420. #define XUDC_EVENT_RING_SIZE 4096
  421. struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
  422. dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
  423. unsigned int event_ring_index;
  424. unsigned int event_ring_deq_ptr;
  425. bool ccs;
  426. #define XUDC_NR_EPS 32
  427. struct tegra_xudc_ep ep[XUDC_NR_EPS];
  428. struct tegra_xudc_ep_context *ep_context;
  429. dma_addr_t ep_context_phys;
  430. struct device *genpd_dev_device;
  431. struct device *genpd_dev_ss;
  432. struct device_link *genpd_dl_device;
  433. struct device_link *genpd_dl_ss;
  434. struct dma_pool *transfer_ring_pool;
  435. bool queued_setup_packet;
  436. struct tegra_xudc_setup_packet setup_packet;
  437. enum tegra_xudc_setup_state setup_state;
  438. u16 setup_seq_num;
  439. u16 dev_addr;
  440. u16 isoch_delay;
  441. struct tegra_xudc_sel_timing sel_timing;
  442. u8 test_mode_pattern;
  443. u16 status_buf;
  444. struct tegra_xudc_request *ep0_req;
  445. bool pullup;
  446. unsigned int nr_enabled_eps;
  447. unsigned int nr_isoch_eps;
  448. unsigned int device_state;
  449. unsigned int resume_state;
  450. int irq;
  451. void __iomem *base;
  452. resource_size_t phys_base;
  453. void __iomem *ipfs;
  454. void __iomem *fpci;
  455. struct regulator_bulk_data *supplies;
  456. struct clk_bulk_data *clks;
  457. bool device_mode;
  458. struct work_struct usb_role_sw_work;
  459. struct phy **usb3_phy;
  460. struct phy *curr_usb3_phy;
  461. struct phy **utmi_phy;
  462. struct phy *curr_utmi_phy;
  463. struct tegra_xudc_save_regs saved_regs;
  464. bool suspended;
  465. bool powergated;
  466. struct usb_phy **usbphy;
  467. struct usb_phy *curr_usbphy;
  468. struct notifier_block vbus_nb;
  469. struct completion disconnect_complete;
  470. bool selfpowered;
  471. #define TOGGLE_VBUS_WAIT_MS 100
  472. struct delayed_work plc_reset_work;
  473. bool wait_csc;
  474. struct delayed_work port_reset_war_work;
  475. bool wait_for_sec_prc;
  476. };
  477. #define XUDC_TRB_MAX_BUFFER_SIZE 65536
  478. #define XUDC_MAX_ISOCH_EPS 4
  479. #define XUDC_INTERRUPT_MODERATION_US 0
  480. static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
  481. .bLength = USB_DT_ENDPOINT_SIZE,
  482. .bDescriptorType = USB_DT_ENDPOINT,
  483. .bEndpointAddress = 0,
  484. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  485. .wMaxPacketSize = cpu_to_le16(64),
  486. };
  487. struct tegra_xudc_soc {
  488. const char * const *supply_names;
  489. unsigned int num_supplies;
  490. const char * const *clock_names;
  491. unsigned int num_clks;
  492. unsigned int num_phys;
  493. bool u1_enable;
  494. bool u2_enable;
  495. bool lpm_enable;
  496. bool invalid_seq_num;
  497. bool pls_quirk;
  498. bool port_reset_quirk;
  499. bool port_speed_quirk;
  500. bool has_ipfs;
  501. };
  502. static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
  503. {
  504. return readl(xudc->fpci + offset);
  505. }
  506. static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
  507. unsigned int offset)
  508. {
  509. writel(val, xudc->fpci + offset);
  510. }
  511. static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
  512. {
  513. return readl(xudc->ipfs + offset);
  514. }
  515. static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
  516. unsigned int offset)
  517. {
  518. writel(val, xudc->ipfs + offset);
  519. }
  520. static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
  521. {
  522. return readl(xudc->base + offset);
  523. }
  524. static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
  525. unsigned int offset)
  526. {
  527. writel(val, xudc->base + offset);
  528. }
  529. static inline int xudc_readl_poll(struct tegra_xudc *xudc,
  530. unsigned int offset, u32 mask, u32 val)
  531. {
  532. u32 regval;
  533. return readl_poll_timeout_atomic(xudc->base + offset, regval,
  534. (regval & mask) == val, 1, 100);
  535. }
  536. static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
  537. {
  538. return container_of(gadget, struct tegra_xudc, gadget);
  539. }
  540. static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
  541. {
  542. return container_of(ep, struct tegra_xudc_ep, usb_ep);
  543. }
  544. static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
  545. {
  546. return container_of(req, struct tegra_xudc_request, usb_req);
  547. }
  548. static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
  549. struct tegra_xudc_trb *trb)
  550. {
  551. dev_dbg(xudc->dev,
  552. "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
  553. type, trb, trb->data_lo, trb->data_hi, trb->status,
  554. trb->control);
  555. }
  556. static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
  557. {
  558. u32 val;
  559. /* limit port speed to gen 1 */
  560. val = xudc_readl(xudc, SSPX_CORE_CNT56);
  561. val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
  562. val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
  563. xudc_writel(xudc, val, SSPX_CORE_CNT56);
  564. val = xudc_readl(xudc, SSPX_CORE_CNT57);
  565. val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
  566. val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
  567. xudc_writel(xudc, val, SSPX_CORE_CNT57);
  568. val = xudc_readl(xudc, SSPX_CORE_CNT65);
  569. val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
  570. val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
  571. xudc_writel(xudc, val, SSPX_CORE_CNT66);
  572. val = xudc_readl(xudc, SSPX_CORE_CNT66);
  573. val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
  574. val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
  575. xudc_writel(xudc, val, SSPX_CORE_CNT66);
  576. val = xudc_readl(xudc, SSPX_CORE_CNT67);
  577. val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
  578. val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
  579. xudc_writel(xudc, val, SSPX_CORE_CNT67);
  580. val = xudc_readl(xudc, SSPX_CORE_CNT72);
  581. val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
  582. val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
  583. xudc_writel(xudc, val, SSPX_CORE_CNT72);
  584. }
  585. static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
  586. {
  587. u32 val;
  588. /* restore port speed to gen2 */
  589. val = xudc_readl(xudc, SSPX_CORE_CNT56);
  590. val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
  591. val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
  592. xudc_writel(xudc, val, SSPX_CORE_CNT56);
  593. val = xudc_readl(xudc, SSPX_CORE_CNT57);
  594. val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
  595. val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
  596. xudc_writel(xudc, val, SSPX_CORE_CNT57);
  597. val = xudc_readl(xudc, SSPX_CORE_CNT65);
  598. val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
  599. val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
  600. xudc_writel(xudc, val, SSPX_CORE_CNT66);
  601. val = xudc_readl(xudc, SSPX_CORE_CNT66);
  602. val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
  603. val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
  604. xudc_writel(xudc, val, SSPX_CORE_CNT66);
  605. val = xudc_readl(xudc, SSPX_CORE_CNT67);
  606. val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
  607. val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
  608. xudc_writel(xudc, val, SSPX_CORE_CNT67);
  609. val = xudc_readl(xudc, SSPX_CORE_CNT72);
  610. val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
  611. val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
  612. xudc_writel(xudc, val, SSPX_CORE_CNT72);
  613. }
  614. static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
  615. {
  616. int err;
  617. pm_runtime_get_sync(xudc->dev);
  618. tegra_phy_xusb_utmi_pad_power_on(xudc->curr_utmi_phy);
  619. err = phy_power_on(xudc->curr_utmi_phy);
  620. if (err < 0)
  621. dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
  622. err = phy_power_on(xudc->curr_usb3_phy);
  623. if (err < 0)
  624. dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
  625. dev_dbg(xudc->dev, "device mode on\n");
  626. phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
  627. USB_ROLE_DEVICE);
  628. }
  629. static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
  630. {
  631. bool connected = false;
  632. u32 pls, val;
  633. int err;
  634. dev_dbg(xudc->dev, "device mode off\n");
  635. connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
  636. reinit_completion(&xudc->disconnect_complete);
  637. if (xudc->soc->port_speed_quirk)
  638. tegra_xudc_restore_port_speed(xudc);
  639. phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
  640. pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
  641. PORTSC_PLS_SHIFT;
  642. /* Direct link to U0 if disconnected in RESUME or U2. */
  643. if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
  644. (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
  645. val = xudc_readl(xudc, PORTPM);
  646. val |= PORTPM_FRWE;
  647. xudc_writel(xudc, val, PORTPM);
  648. val = xudc_readl(xudc, PORTSC);
  649. val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
  650. val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
  651. xudc_writel(xudc, val, PORTSC);
  652. }
  653. /* Wait for disconnect event. */
  654. if (connected)
  655. wait_for_completion(&xudc->disconnect_complete);
  656. /* Make sure interrupt handler has completed before powergating. */
  657. synchronize_irq(xudc->irq);
  658. tegra_phy_xusb_utmi_pad_power_down(xudc->curr_utmi_phy);
  659. err = phy_power_off(xudc->curr_utmi_phy);
  660. if (err < 0)
  661. dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
  662. err = phy_power_off(xudc->curr_usb3_phy);
  663. if (err < 0)
  664. dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
  665. pm_runtime_put(xudc->dev);
  666. }
  667. static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
  668. {
  669. struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
  670. usb_role_sw_work);
  671. if (xudc->device_mode)
  672. tegra_xudc_device_mode_on(xudc);
  673. else
  674. tegra_xudc_device_mode_off(xudc);
  675. }
  676. static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
  677. struct usb_phy *usbphy)
  678. {
  679. unsigned int i;
  680. for (i = 0; i < xudc->soc->num_phys; i++) {
  681. if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
  682. return i;
  683. }
  684. dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
  685. return -1;
  686. }
  687. static int tegra_xudc_vbus_notify(struct notifier_block *nb,
  688. unsigned long action, void *data)
  689. {
  690. struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
  691. vbus_nb);
  692. struct usb_phy *usbphy = (struct usb_phy *)data;
  693. int phy_index;
  694. dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
  695. if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
  696. (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
  697. dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
  698. xudc->device_mode);
  699. return NOTIFY_OK;
  700. }
  701. xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
  702. false;
  703. phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
  704. dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
  705. phy_index);
  706. if (!xudc->suspended && phy_index != -1) {
  707. xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
  708. xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
  709. xudc->curr_usbphy = usbphy;
  710. schedule_work(&xudc->usb_role_sw_work);
  711. }
  712. return NOTIFY_OK;
  713. }
  714. static void tegra_xudc_plc_reset_work(struct work_struct *work)
  715. {
  716. struct delayed_work *dwork = to_delayed_work(work);
  717. struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
  718. plc_reset_work);
  719. unsigned long flags;
  720. spin_lock_irqsave(&xudc->lock, flags);
  721. if (xudc->wait_csc) {
  722. u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
  723. PORTSC_PLS_SHIFT;
  724. if (pls == PORTSC_PLS_INACTIVE) {
  725. dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
  726. phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
  727. USB_ROLE_NONE);
  728. phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
  729. USB_ROLE_DEVICE);
  730. xudc->wait_csc = false;
  731. }
  732. }
  733. spin_unlock_irqrestore(&xudc->lock, flags);
  734. }
  735. static void tegra_xudc_port_reset_war_work(struct work_struct *work)
  736. {
  737. struct delayed_work *dwork = to_delayed_work(work);
  738. struct tegra_xudc *xudc =
  739. container_of(dwork, struct tegra_xudc, port_reset_war_work);
  740. unsigned long flags;
  741. u32 pls;
  742. int ret;
  743. spin_lock_irqsave(&xudc->lock, flags);
  744. if (xudc->device_mode && xudc->wait_for_sec_prc) {
  745. pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
  746. PORTSC_PLS_SHIFT;
  747. dev_dbg(xudc->dev, "pls = %x\n", pls);
  748. if (pls == PORTSC_PLS_DISABLED) {
  749. dev_dbg(xudc->dev, "toggle vbus\n");
  750. /* PRC doesn't complete in 100ms, toggle the vbus */
  751. ret = tegra_phy_xusb_utmi_port_reset(
  752. xudc->curr_utmi_phy);
  753. if (ret == 1)
  754. xudc->wait_for_sec_prc = 0;
  755. }
  756. }
  757. spin_unlock_irqrestore(&xudc->lock, flags);
  758. }
  759. static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
  760. struct tegra_xudc_trb *trb)
  761. {
  762. unsigned int index;
  763. index = trb - ep->transfer_ring;
  764. if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
  765. return 0;
  766. return (ep->transfer_ring_phys + index * sizeof(*trb));
  767. }
  768. static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
  769. dma_addr_t addr)
  770. {
  771. struct tegra_xudc_trb *trb;
  772. unsigned int index;
  773. index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
  774. if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
  775. return NULL;
  776. trb = &ep->transfer_ring[index];
  777. return trb;
  778. }
  779. static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
  780. {
  781. xudc_writel(xudc, BIT(ep), EP_RELOAD);
  782. xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
  783. }
  784. static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
  785. {
  786. u32 val;
  787. val = xudc_readl(xudc, EP_PAUSE);
  788. if (val & BIT(ep))
  789. return;
  790. val |= BIT(ep);
  791. xudc_writel(xudc, val, EP_PAUSE);
  792. xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
  793. xudc_writel(xudc, BIT(ep), EP_STCHG);
  794. }
  795. static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
  796. {
  797. u32 val;
  798. val = xudc_readl(xudc, EP_PAUSE);
  799. if (!(val & BIT(ep)))
  800. return;
  801. val &= ~BIT(ep);
  802. xudc_writel(xudc, val, EP_PAUSE);
  803. xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
  804. xudc_writel(xudc, BIT(ep), EP_STCHG);
  805. }
  806. static void ep_unpause_all(struct tegra_xudc *xudc)
  807. {
  808. u32 val;
  809. val = xudc_readl(xudc, EP_PAUSE);
  810. xudc_writel(xudc, 0, EP_PAUSE);
  811. xudc_readl_poll(xudc, EP_STCHG, val, val);
  812. xudc_writel(xudc, val, EP_STCHG);
  813. }
  814. static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
  815. {
  816. u32 val;
  817. val = xudc_readl(xudc, EP_HALT);
  818. if (val & BIT(ep))
  819. return;
  820. val |= BIT(ep);
  821. xudc_writel(xudc, val, EP_HALT);
  822. xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
  823. xudc_writel(xudc, BIT(ep), EP_STCHG);
  824. }
  825. static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
  826. {
  827. u32 val;
  828. val = xudc_readl(xudc, EP_HALT);
  829. if (!(val & BIT(ep)))
  830. return;
  831. val &= ~BIT(ep);
  832. xudc_writel(xudc, val, EP_HALT);
  833. xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
  834. xudc_writel(xudc, BIT(ep), EP_STCHG);
  835. }
  836. static void ep_unhalt_all(struct tegra_xudc *xudc)
  837. {
  838. u32 val;
  839. val = xudc_readl(xudc, EP_HALT);
  840. if (!val)
  841. return;
  842. xudc_writel(xudc, 0, EP_HALT);
  843. xudc_readl_poll(xudc, EP_STCHG, val, val);
  844. xudc_writel(xudc, val, EP_STCHG);
  845. }
  846. static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
  847. {
  848. xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
  849. xudc_writel(xudc, BIT(ep), EP_STOPPED);
  850. }
  851. static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
  852. {
  853. xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
  854. }
  855. static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
  856. struct tegra_xudc_request *req, int status)
  857. {
  858. struct tegra_xudc *xudc = ep->xudc;
  859. dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
  860. req, ep->index, status);
  861. if (likely(req->usb_req.status == -EINPROGRESS))
  862. req->usb_req.status = status;
  863. list_del_init(&req->list);
  864. if (usb_endpoint_xfer_control(ep->desc)) {
  865. usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
  866. (xudc->setup_state ==
  867. DATA_STAGE_XFER));
  868. } else {
  869. usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
  870. usb_endpoint_dir_in(ep->desc));
  871. }
  872. spin_unlock(&xudc->lock);
  873. usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
  874. spin_lock(&xudc->lock);
  875. }
  876. static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
  877. {
  878. struct tegra_xudc_request *req;
  879. while (!list_empty(&ep->queue)) {
  880. req = list_first_entry(&ep->queue, struct tegra_xudc_request,
  881. list);
  882. tegra_xudc_req_done(ep, req, status);
  883. }
  884. }
  885. static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
  886. {
  887. if (ep->ring_full)
  888. return 0;
  889. if (ep->deq_ptr > ep->enq_ptr)
  890. return ep->deq_ptr - ep->enq_ptr - 1;
  891. return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
  892. }
  893. static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
  894. struct tegra_xudc_request *req,
  895. struct tegra_xudc_trb *trb,
  896. bool ioc)
  897. {
  898. struct tegra_xudc *xudc = ep->xudc;
  899. dma_addr_t buf_addr;
  900. size_t len;
  901. len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
  902. req->buf_queued);
  903. if (len > 0)
  904. buf_addr = req->usb_req.dma + req->buf_queued;
  905. else
  906. buf_addr = 0;
  907. trb_write_data_ptr(trb, buf_addr);
  908. trb_write_transfer_len(trb, len);
  909. trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
  910. if (req->trbs_queued == req->trbs_needed - 1 ||
  911. (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
  912. trb_write_chain(trb, 0);
  913. else
  914. trb_write_chain(trb, 1);
  915. trb_write_ioc(trb, ioc);
  916. if (usb_endpoint_dir_out(ep->desc) ||
  917. (usb_endpoint_xfer_control(ep->desc) &&
  918. (xudc->setup_state == DATA_STAGE_RECV)))
  919. trb_write_isp(trb, 1);
  920. else
  921. trb_write_isp(trb, 0);
  922. if (usb_endpoint_xfer_control(ep->desc)) {
  923. if (xudc->setup_state == DATA_STAGE_XFER ||
  924. xudc->setup_state == DATA_STAGE_RECV)
  925. trb_write_type(trb, TRB_TYPE_DATA_STAGE);
  926. else
  927. trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
  928. if (xudc->setup_state == DATA_STAGE_XFER ||
  929. xudc->setup_state == STATUS_STAGE_XFER)
  930. trb_write_data_stage_dir(trb, 1);
  931. else
  932. trb_write_data_stage_dir(trb, 0);
  933. } else if (usb_endpoint_xfer_isoc(ep->desc)) {
  934. trb_write_type(trb, TRB_TYPE_ISOCH);
  935. trb_write_sia(trb, 1);
  936. trb_write_frame_id(trb, 0);
  937. trb_write_tlbpc(trb, 0);
  938. } else if (usb_ss_max_streams(ep->comp_desc)) {
  939. trb_write_type(trb, TRB_TYPE_STREAM);
  940. trb_write_stream_id(trb, req->usb_req.stream_id);
  941. } else {
  942. trb_write_type(trb, TRB_TYPE_NORMAL);
  943. trb_write_stream_id(trb, 0);
  944. }
  945. trb_write_cycle(trb, ep->pcs);
  946. req->trbs_queued++;
  947. req->buf_queued += len;
  948. dump_trb(xudc, "TRANSFER", trb);
  949. }
  950. static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
  951. struct tegra_xudc_request *req)
  952. {
  953. unsigned int i, count, available;
  954. bool wait_td = false;
  955. available = ep_available_trbs(ep);
  956. count = req->trbs_needed - req->trbs_queued;
  957. if (available < count) {
  958. count = available;
  959. ep->ring_full = true;
  960. }
  961. /*
  962. * To generate zero-length packet on USB bus, SW needs schedule a
  963. * standalone zero-length TD. According to HW's behavior, SW needs
  964. * to schedule TDs in different ways for different endpoint types.
  965. *
  966. * For control endpoint:
  967. * - Data stage TD (IOC = 1, CH = 0)
  968. * - Ring doorbell and wait transfer event
  969. * - Data stage TD for ZLP (IOC = 1, CH = 0)
  970. * - Ring doorbell
  971. *
  972. * For bulk and interrupt endpoints:
  973. * - Normal transfer TD (IOC = 0, CH = 0)
  974. * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
  975. * - Ring doorbell
  976. */
  977. if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
  978. wait_td = true;
  979. if (!req->first_trb)
  980. req->first_trb = &ep->transfer_ring[ep->enq_ptr];
  981. for (i = 0; i < count; i++) {
  982. struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
  983. bool ioc = false;
  984. if ((i == count - 1) || (wait_td && i == count - 2))
  985. ioc = true;
  986. tegra_xudc_queue_one_trb(ep, req, trb, ioc);
  987. req->last_trb = trb;
  988. ep->enq_ptr++;
  989. if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
  990. trb = &ep->transfer_ring[ep->enq_ptr];
  991. trb_write_cycle(trb, ep->pcs);
  992. ep->pcs = !ep->pcs;
  993. ep->enq_ptr = 0;
  994. }
  995. if (ioc)
  996. break;
  997. }
  998. return count;
  999. }
  1000. static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
  1001. {
  1002. struct tegra_xudc *xudc = ep->xudc;
  1003. u32 val;
  1004. if (list_empty(&ep->queue))
  1005. return;
  1006. val = DB_TARGET(ep->index);
  1007. if (usb_endpoint_xfer_control(ep->desc)) {
  1008. val |= DB_STREAMID(xudc->setup_seq_num);
  1009. } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
  1010. struct tegra_xudc_request *req;
  1011. /* Don't ring doorbell if the stream has been rejected. */
  1012. if (ep->stream_rejected)
  1013. return;
  1014. req = list_first_entry(&ep->queue, struct tegra_xudc_request,
  1015. list);
  1016. val |= DB_STREAMID(req->usb_req.stream_id);
  1017. }
  1018. dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
  1019. xudc_writel(xudc, val, DB);
  1020. }
  1021. static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
  1022. {
  1023. struct tegra_xudc_request *req;
  1024. bool trbs_queued = false;
  1025. list_for_each_entry(req, &ep->queue, list) {
  1026. if (ep->ring_full)
  1027. break;
  1028. if (tegra_xudc_queue_trbs(ep, req) > 0)
  1029. trbs_queued = true;
  1030. }
  1031. if (trbs_queued)
  1032. tegra_xudc_ep_ring_doorbell(ep);
  1033. }
  1034. static int
  1035. __tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
  1036. {
  1037. struct tegra_xudc *xudc = ep->xudc;
  1038. int err;
  1039. if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
  1040. dev_err(xudc->dev, "control EP has pending transfers\n");
  1041. return -EINVAL;
  1042. }
  1043. if (usb_endpoint_xfer_control(ep->desc)) {
  1044. err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
  1045. (xudc->setup_state ==
  1046. DATA_STAGE_XFER));
  1047. } else {
  1048. err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
  1049. usb_endpoint_dir_in(ep->desc));
  1050. }
  1051. if (err < 0) {
  1052. dev_err(xudc->dev, "failed to map request: %d\n", err);
  1053. return err;
  1054. }
  1055. req->first_trb = NULL;
  1056. req->last_trb = NULL;
  1057. req->buf_queued = 0;
  1058. req->trbs_queued = 0;
  1059. req->need_zlp = false;
  1060. req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
  1061. XUDC_TRB_MAX_BUFFER_SIZE);
  1062. if (req->usb_req.length == 0)
  1063. req->trbs_needed++;
  1064. if (!usb_endpoint_xfer_isoc(ep->desc) &&
  1065. req->usb_req.zero && req->usb_req.length &&
  1066. ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
  1067. req->trbs_needed++;
  1068. req->need_zlp = true;
  1069. }
  1070. req->usb_req.status = -EINPROGRESS;
  1071. req->usb_req.actual = 0;
  1072. list_add_tail(&req->list, &ep->queue);
  1073. tegra_xudc_ep_kick_queue(ep);
  1074. return 0;
  1075. }
  1076. static int
  1077. tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
  1078. gfp_t gfp)
  1079. {
  1080. struct tegra_xudc_request *req;
  1081. struct tegra_xudc_ep *ep;
  1082. struct tegra_xudc *xudc;
  1083. unsigned long flags;
  1084. int ret;
  1085. if (!usb_ep || !usb_req)
  1086. return -EINVAL;
  1087. ep = to_xudc_ep(usb_ep);
  1088. req = to_xudc_req(usb_req);
  1089. xudc = ep->xudc;
  1090. spin_lock_irqsave(&xudc->lock, flags);
  1091. if (xudc->powergated || !ep->desc) {
  1092. ret = -ESHUTDOWN;
  1093. goto unlock;
  1094. }
  1095. ret = __tegra_xudc_ep_queue(ep, req);
  1096. unlock:
  1097. spin_unlock_irqrestore(&xudc->lock, flags);
  1098. return ret;
  1099. }
  1100. static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
  1101. struct tegra_xudc_request *req)
  1102. {
  1103. struct tegra_xudc_trb *trb = req->first_trb;
  1104. bool pcs_enq = trb_read_cycle(trb);
  1105. bool pcs;
  1106. /*
  1107. * Clear out all the TRBs part of or after the cancelled request,
  1108. * and must correct trb cycle bit to the last un-enqueued state.
  1109. */
  1110. while (trb != &ep->transfer_ring[ep->enq_ptr]) {
  1111. pcs = trb_read_cycle(trb);
  1112. memset(trb, 0, sizeof(*trb));
  1113. trb_write_cycle(trb, !pcs);
  1114. trb++;
  1115. if (trb_read_type(trb) == TRB_TYPE_LINK)
  1116. trb = ep->transfer_ring;
  1117. }
  1118. /* Requests will be re-queued at the start of the cancelled request. */
  1119. ep->enq_ptr = req->first_trb - ep->transfer_ring;
  1120. /*
  1121. * Retrieve the correct cycle bit state from the first trb of
  1122. * the cancelled request.
  1123. */
  1124. ep->pcs = pcs_enq;
  1125. ep->ring_full = false;
  1126. list_for_each_entry_continue(req, &ep->queue, list) {
  1127. req->usb_req.status = -EINPROGRESS;
  1128. req->usb_req.actual = 0;
  1129. req->first_trb = NULL;
  1130. req->last_trb = NULL;
  1131. req->buf_queued = 0;
  1132. req->trbs_queued = 0;
  1133. }
  1134. }
  1135. /*
  1136. * Determine if the given TRB is in the range [first trb, last trb] for the
  1137. * given request.
  1138. */
  1139. static bool trb_in_request(struct tegra_xudc_ep *ep,
  1140. struct tegra_xudc_request *req,
  1141. struct tegra_xudc_trb *trb)
  1142. {
  1143. dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
  1144. req->first_trb, req->last_trb, trb);
  1145. if (trb >= req->first_trb && (trb <= req->last_trb ||
  1146. req->last_trb < req->first_trb))
  1147. return true;
  1148. if (trb < req->first_trb && trb <= req->last_trb &&
  1149. req->last_trb < req->first_trb)
  1150. return true;
  1151. return false;
  1152. }
  1153. /*
  1154. * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
  1155. * for the given endpoint and request.
  1156. */
  1157. static bool trb_before_request(struct tegra_xudc_ep *ep,
  1158. struct tegra_xudc_request *req,
  1159. struct tegra_xudc_trb *trb)
  1160. {
  1161. struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
  1162. dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
  1163. __func__, req->first_trb, req->last_trb, enq_trb, trb);
  1164. if (trb < req->first_trb && (enq_trb <= trb ||
  1165. req->first_trb < enq_trb))
  1166. return true;
  1167. if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
  1168. return true;
  1169. return false;
  1170. }
  1171. static int
  1172. __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
  1173. struct tegra_xudc_request *req)
  1174. {
  1175. struct tegra_xudc *xudc = ep->xudc;
  1176. struct tegra_xudc_request *r = NULL, *iter;
  1177. struct tegra_xudc_trb *deq_trb;
  1178. bool busy, kick_queue = false;
  1179. int ret = 0;
  1180. /* Make sure the request is actually queued to this endpoint. */
  1181. list_for_each_entry(iter, &ep->queue, list) {
  1182. if (iter != req)
  1183. continue;
  1184. r = iter;
  1185. break;
  1186. }
  1187. if (!r)
  1188. return -EINVAL;
  1189. /* Request hasn't been queued in the transfer ring yet. */
  1190. if (!req->trbs_queued) {
  1191. tegra_xudc_req_done(ep, req, -ECONNRESET);
  1192. return 0;
  1193. }
  1194. /* Halt DMA for this endpoint. */
  1195. if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
  1196. ep_pause(xudc, ep->index);
  1197. ep_wait_for_inactive(xudc, ep->index);
  1198. }
  1199. deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
  1200. /* Is the hardware processing the TRB at the dequeue pointer? */
  1201. busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
  1202. if (trb_in_request(ep, req, deq_trb) && busy) {
  1203. /*
  1204. * Request has been partially completed or it hasn't
  1205. * started processing yet.
  1206. */
  1207. dma_addr_t deq_ptr;
  1208. squeeze_transfer_ring(ep, req);
  1209. req->usb_req.actual = ep_ctx_read_edtla(ep->context);
  1210. tegra_xudc_req_done(ep, req, -ECONNRESET);
  1211. kick_queue = true;
  1212. /* EDTLA is > 0: request has been partially completed */
  1213. if (req->usb_req.actual > 0) {
  1214. /*
  1215. * Abort the pending transfer and update the dequeue
  1216. * pointer
  1217. */
  1218. ep_ctx_write_edtla(ep->context, 0);
  1219. ep_ctx_write_partial_td(ep->context, 0);
  1220. ep_ctx_write_data_offset(ep->context, 0);
  1221. deq_ptr = trb_virt_to_phys(ep,
  1222. &ep->transfer_ring[ep->enq_ptr]);
  1223. if (dma_mapping_error(xudc->dev, deq_ptr)) {
  1224. ret = -EINVAL;
  1225. } else {
  1226. ep_ctx_write_deq_ptr(ep->context, deq_ptr);
  1227. ep_ctx_write_dcs(ep->context, ep->pcs);
  1228. ep_reload(xudc, ep->index);
  1229. }
  1230. }
  1231. } else if (trb_before_request(ep, req, deq_trb) && busy) {
  1232. /* Request hasn't started processing yet. */
  1233. squeeze_transfer_ring(ep, req);
  1234. tegra_xudc_req_done(ep, req, -ECONNRESET);
  1235. kick_queue = true;
  1236. } else {
  1237. /*
  1238. * Request has completed, but we haven't processed the
  1239. * completion event yet.
  1240. */
  1241. tegra_xudc_req_done(ep, req, -ECONNRESET);
  1242. ret = -EINVAL;
  1243. }
  1244. /* Resume the endpoint. */
  1245. ep_unpause(xudc, ep->index);
  1246. if (kick_queue)
  1247. tegra_xudc_ep_kick_queue(ep);
  1248. return ret;
  1249. }
  1250. static int
  1251. tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
  1252. {
  1253. struct tegra_xudc_request *req;
  1254. struct tegra_xudc_ep *ep;
  1255. struct tegra_xudc *xudc;
  1256. unsigned long flags;
  1257. int ret;
  1258. if (!usb_ep || !usb_req)
  1259. return -EINVAL;
  1260. ep = to_xudc_ep(usb_ep);
  1261. req = to_xudc_req(usb_req);
  1262. xudc = ep->xudc;
  1263. spin_lock_irqsave(&xudc->lock, flags);
  1264. if (xudc->powergated || !ep->desc) {
  1265. ret = -ESHUTDOWN;
  1266. goto unlock;
  1267. }
  1268. ret = __tegra_xudc_ep_dequeue(ep, req);
  1269. unlock:
  1270. spin_unlock_irqrestore(&xudc->lock, flags);
  1271. return ret;
  1272. }
  1273. static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
  1274. {
  1275. struct tegra_xudc *xudc = ep->xudc;
  1276. if (!ep->desc)
  1277. return -EINVAL;
  1278. if (usb_endpoint_xfer_isoc(ep->desc)) {
  1279. dev_err(xudc->dev, "can't halt isochronous EP\n");
  1280. return -ENOTSUPP;
  1281. }
  1282. if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
  1283. dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
  1284. halt ? "halted" : "not halted");
  1285. return 0;
  1286. }
  1287. if (halt) {
  1288. ep_halt(xudc, ep->index);
  1289. } else {
  1290. ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
  1291. ep_reload(xudc, ep->index);
  1292. ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
  1293. ep_ctx_write_rsvd(ep->context, 0);
  1294. ep_ctx_write_partial_td(ep->context, 0);
  1295. ep_ctx_write_splitxstate(ep->context, 0);
  1296. ep_ctx_write_seq_num(ep->context, 0);
  1297. ep_reload(xudc, ep->index);
  1298. ep_unpause(xudc, ep->index);
  1299. ep_unhalt(xudc, ep->index);
  1300. tegra_xudc_ep_ring_doorbell(ep);
  1301. }
  1302. return 0;
  1303. }
  1304. static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
  1305. {
  1306. struct tegra_xudc_ep *ep;
  1307. struct tegra_xudc *xudc;
  1308. unsigned long flags;
  1309. int ret;
  1310. if (!usb_ep)
  1311. return -EINVAL;
  1312. ep = to_xudc_ep(usb_ep);
  1313. xudc = ep->xudc;
  1314. spin_lock_irqsave(&xudc->lock, flags);
  1315. if (xudc->powergated) {
  1316. ret = -ESHUTDOWN;
  1317. goto unlock;
  1318. }
  1319. if (value && usb_endpoint_dir_in(ep->desc) &&
  1320. !list_empty(&ep->queue)) {
  1321. dev_err(xudc->dev, "can't halt EP with requests pending\n");
  1322. ret = -EAGAIN;
  1323. goto unlock;
  1324. }
  1325. ret = __tegra_xudc_ep_set_halt(ep, value);
  1326. unlock:
  1327. spin_unlock_irqrestore(&xudc->lock, flags);
  1328. return ret;
  1329. }
  1330. static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
  1331. {
  1332. const struct usb_endpoint_descriptor *desc = ep->desc;
  1333. const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
  1334. struct tegra_xudc *xudc = ep->xudc;
  1335. u16 maxpacket, maxburst = 0, esit = 0;
  1336. u32 val;
  1337. maxpacket = usb_endpoint_maxp(desc);
  1338. if (xudc->gadget.speed == USB_SPEED_SUPER) {
  1339. if (!usb_endpoint_xfer_control(desc))
  1340. maxburst = comp_desc->bMaxBurst;
  1341. if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
  1342. esit = le16_to_cpu(comp_desc->wBytesPerInterval);
  1343. } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
  1344. (usb_endpoint_xfer_int(desc) ||
  1345. usb_endpoint_xfer_isoc(desc))) {
  1346. if (xudc->gadget.speed == USB_SPEED_HIGH) {
  1347. maxburst = usb_endpoint_maxp_mult(desc) - 1;
  1348. if (maxburst == 0x3) {
  1349. dev_warn(xudc->dev,
  1350. "invalid endpoint maxburst\n");
  1351. maxburst = 0x2;
  1352. }
  1353. }
  1354. esit = maxpacket * (maxburst + 1);
  1355. }
  1356. memset(ep->context, 0, sizeof(*ep->context));
  1357. ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
  1358. ep_ctx_write_interval(ep->context, desc->bInterval);
  1359. if (xudc->gadget.speed == USB_SPEED_SUPER) {
  1360. if (usb_endpoint_xfer_isoc(desc)) {
  1361. ep_ctx_write_mult(ep->context,
  1362. comp_desc->bmAttributes & 0x3);
  1363. }
  1364. if (usb_endpoint_xfer_bulk(desc)) {
  1365. ep_ctx_write_max_pstreams(ep->context,
  1366. comp_desc->bmAttributes &
  1367. 0x1f);
  1368. ep_ctx_write_lsa(ep->context, 1);
  1369. }
  1370. }
  1371. if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
  1372. val = usb_endpoint_type(desc);
  1373. else
  1374. val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
  1375. ep_ctx_write_type(ep->context, val);
  1376. ep_ctx_write_cerr(ep->context, 0x3);
  1377. ep_ctx_write_max_packet_size(ep->context, maxpacket);
  1378. ep_ctx_write_max_burst_size(ep->context, maxburst);
  1379. ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
  1380. ep_ctx_write_dcs(ep->context, ep->pcs);
  1381. /* Select a reasonable average TRB length based on endpoint type. */
  1382. switch (usb_endpoint_type(desc)) {
  1383. case USB_ENDPOINT_XFER_CONTROL:
  1384. val = 8;
  1385. break;
  1386. case USB_ENDPOINT_XFER_INT:
  1387. val = 1024;
  1388. break;
  1389. case USB_ENDPOINT_XFER_BULK:
  1390. case USB_ENDPOINT_XFER_ISOC:
  1391. default:
  1392. val = 3072;
  1393. break;
  1394. }
  1395. ep_ctx_write_avg_trb_len(ep->context, val);
  1396. ep_ctx_write_max_esit_payload(ep->context, esit);
  1397. ep_ctx_write_cerrcnt(ep->context, 0x3);
  1398. }
  1399. static void setup_link_trb(struct tegra_xudc_ep *ep,
  1400. struct tegra_xudc_trb *trb)
  1401. {
  1402. trb_write_data_ptr(trb, ep->transfer_ring_phys);
  1403. trb_write_type(trb, TRB_TYPE_LINK);
  1404. trb_write_toggle_cycle(trb, 1);
  1405. }
  1406. static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
  1407. {
  1408. struct tegra_xudc *xudc = ep->xudc;
  1409. if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
  1410. dev_err(xudc->dev, "endpoint %u already disabled\n",
  1411. ep->index);
  1412. return -EINVAL;
  1413. }
  1414. ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
  1415. ep_reload(xudc, ep->index);
  1416. tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
  1417. xudc->nr_enabled_eps--;
  1418. if (usb_endpoint_xfer_isoc(ep->desc))
  1419. xudc->nr_isoch_eps--;
  1420. ep->desc = NULL;
  1421. ep->comp_desc = NULL;
  1422. memset(ep->context, 0, sizeof(*ep->context));
  1423. ep_unpause(xudc, ep->index);
  1424. ep_unhalt(xudc, ep->index);
  1425. if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
  1426. xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
  1427. /*
  1428. * If this is the last endpoint disabled in a de-configure request,
  1429. * switch back to address state.
  1430. */
  1431. if ((xudc->device_state == USB_STATE_CONFIGURED) &&
  1432. (xudc->nr_enabled_eps == 1)) {
  1433. u32 val;
  1434. xudc->device_state = USB_STATE_ADDRESS;
  1435. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  1436. val = xudc_readl(xudc, CTRL);
  1437. val &= ~CTRL_RUN;
  1438. xudc_writel(xudc, val, CTRL);
  1439. }
  1440. dev_info(xudc->dev, "ep %u disabled\n", ep->index);
  1441. return 0;
  1442. }
  1443. static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
  1444. {
  1445. struct tegra_xudc_ep *ep;
  1446. struct tegra_xudc *xudc;
  1447. unsigned long flags;
  1448. int ret;
  1449. if (!usb_ep)
  1450. return -EINVAL;
  1451. ep = to_xudc_ep(usb_ep);
  1452. xudc = ep->xudc;
  1453. spin_lock_irqsave(&xudc->lock, flags);
  1454. if (xudc->powergated) {
  1455. ret = -ESHUTDOWN;
  1456. goto unlock;
  1457. }
  1458. ret = __tegra_xudc_ep_disable(ep);
  1459. unlock:
  1460. spin_unlock_irqrestore(&xudc->lock, flags);
  1461. return ret;
  1462. }
  1463. static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
  1464. const struct usb_endpoint_descriptor *desc)
  1465. {
  1466. struct tegra_xudc *xudc = ep->xudc;
  1467. unsigned int i;
  1468. u32 val;
  1469. if (xudc->gadget.speed == USB_SPEED_SUPER &&
  1470. !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
  1471. return -EINVAL;
  1472. /* Disable the EP if it is not disabled */
  1473. if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
  1474. __tegra_xudc_ep_disable(ep);
  1475. ep->desc = desc;
  1476. ep->comp_desc = ep->usb_ep.comp_desc;
  1477. if (usb_endpoint_xfer_isoc(desc)) {
  1478. if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
  1479. dev_err(xudc->dev, "too many isochronous endpoints\n");
  1480. return -EBUSY;
  1481. }
  1482. xudc->nr_isoch_eps++;
  1483. }
  1484. memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
  1485. sizeof(*ep->transfer_ring));
  1486. setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
  1487. ep->enq_ptr = 0;
  1488. ep->deq_ptr = 0;
  1489. ep->pcs = true;
  1490. ep->ring_full = false;
  1491. xudc->nr_enabled_eps++;
  1492. tegra_xudc_ep_context_setup(ep);
  1493. /*
  1494. * No need to reload and un-halt EP0. This will be done automatically
  1495. * once a valid SETUP packet is received.
  1496. */
  1497. if (usb_endpoint_xfer_control(desc))
  1498. goto out;
  1499. /*
  1500. * Transition to configured state once the first non-control
  1501. * endpoint is enabled.
  1502. */
  1503. if (xudc->device_state == USB_STATE_ADDRESS) {
  1504. val = xudc_readl(xudc, CTRL);
  1505. val |= CTRL_RUN;
  1506. xudc_writel(xudc, val, CTRL);
  1507. xudc->device_state = USB_STATE_CONFIGURED;
  1508. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  1509. }
  1510. if (usb_endpoint_xfer_isoc(desc)) {
  1511. /*
  1512. * Pause all bulk endpoints when enabling an isoch endpoint
  1513. * to ensure the isoch endpoint is allocated enough bandwidth.
  1514. */
  1515. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
  1516. if (xudc->ep[i].desc &&
  1517. usb_endpoint_xfer_bulk(xudc->ep[i].desc))
  1518. ep_pause(xudc, i);
  1519. }
  1520. }
  1521. ep_reload(xudc, ep->index);
  1522. ep_unpause(xudc, ep->index);
  1523. ep_unhalt(xudc, ep->index);
  1524. if (usb_endpoint_xfer_isoc(desc)) {
  1525. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
  1526. if (xudc->ep[i].desc &&
  1527. usb_endpoint_xfer_bulk(xudc->ep[i].desc))
  1528. ep_unpause(xudc, i);
  1529. }
  1530. }
  1531. out:
  1532. dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
  1533. usb_ep_type_string(usb_endpoint_type(ep->desc)),
  1534. usb_endpoint_dir_in(ep->desc) ? "in" : "out");
  1535. return 0;
  1536. }
  1537. static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
  1538. const struct usb_endpoint_descriptor *desc)
  1539. {
  1540. struct tegra_xudc_ep *ep;
  1541. struct tegra_xudc *xudc;
  1542. unsigned long flags;
  1543. int ret;
  1544. if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
  1545. return -EINVAL;
  1546. ep = to_xudc_ep(usb_ep);
  1547. xudc = ep->xudc;
  1548. spin_lock_irqsave(&xudc->lock, flags);
  1549. if (xudc->powergated) {
  1550. ret = -ESHUTDOWN;
  1551. goto unlock;
  1552. }
  1553. ret = __tegra_xudc_ep_enable(ep, desc);
  1554. unlock:
  1555. spin_unlock_irqrestore(&xudc->lock, flags);
  1556. return ret;
  1557. }
  1558. static struct usb_request *
  1559. tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
  1560. {
  1561. struct tegra_xudc_request *req;
  1562. req = kzalloc(sizeof(*req), gfp);
  1563. if (!req)
  1564. return NULL;
  1565. INIT_LIST_HEAD(&req->list);
  1566. return &req->usb_req;
  1567. }
  1568. static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
  1569. struct usb_request *usb_req)
  1570. {
  1571. struct tegra_xudc_request *req = to_xudc_req(usb_req);
  1572. kfree(req);
  1573. }
  1574. static const struct usb_ep_ops tegra_xudc_ep_ops = {
  1575. .enable = tegra_xudc_ep_enable,
  1576. .disable = tegra_xudc_ep_disable,
  1577. .alloc_request = tegra_xudc_ep_alloc_request,
  1578. .free_request = tegra_xudc_ep_free_request,
  1579. .queue = tegra_xudc_ep_queue,
  1580. .dequeue = tegra_xudc_ep_dequeue,
  1581. .set_halt = tegra_xudc_ep_set_halt,
  1582. };
  1583. static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
  1584. const struct usb_endpoint_descriptor *desc)
  1585. {
  1586. return -EBUSY;
  1587. }
  1588. static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
  1589. {
  1590. return -EBUSY;
  1591. }
  1592. static const struct usb_ep_ops tegra_xudc_ep0_ops = {
  1593. .enable = tegra_xudc_ep0_enable,
  1594. .disable = tegra_xudc_ep0_disable,
  1595. .alloc_request = tegra_xudc_ep_alloc_request,
  1596. .free_request = tegra_xudc_ep_free_request,
  1597. .queue = tegra_xudc_ep_queue,
  1598. .dequeue = tegra_xudc_ep_dequeue,
  1599. .set_halt = tegra_xudc_ep_set_halt,
  1600. };
  1601. static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
  1602. {
  1603. struct tegra_xudc *xudc = to_xudc(gadget);
  1604. unsigned long flags;
  1605. int ret;
  1606. spin_lock_irqsave(&xudc->lock, flags);
  1607. if (xudc->powergated) {
  1608. ret = -ESHUTDOWN;
  1609. goto unlock;
  1610. }
  1611. ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
  1612. MFINDEX_FRAME_SHIFT;
  1613. unlock:
  1614. spin_unlock_irqrestore(&xudc->lock, flags);
  1615. return ret;
  1616. }
  1617. static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
  1618. {
  1619. unsigned int i;
  1620. u32 val;
  1621. ep_unpause_all(xudc);
  1622. /* Direct link to U0. */
  1623. val = xudc_readl(xudc, PORTSC);
  1624. if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
  1625. val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
  1626. val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
  1627. xudc_writel(xudc, val, PORTSC);
  1628. }
  1629. if (xudc->device_state == USB_STATE_SUSPENDED) {
  1630. xudc->device_state = xudc->resume_state;
  1631. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  1632. xudc->resume_state = 0;
  1633. }
  1634. /*
  1635. * Doorbells may be dropped if they are sent too soon (< ~200ns)
  1636. * after unpausing the endpoint. Wait for 500ns just to be safe.
  1637. */
  1638. ndelay(500);
  1639. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
  1640. tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
  1641. }
  1642. static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
  1643. {
  1644. struct tegra_xudc *xudc = to_xudc(gadget);
  1645. unsigned long flags;
  1646. int ret = 0;
  1647. u32 val;
  1648. spin_lock_irqsave(&xudc->lock, flags);
  1649. if (xudc->powergated) {
  1650. ret = -ESHUTDOWN;
  1651. goto unlock;
  1652. }
  1653. val = xudc_readl(xudc, PORTPM);
  1654. dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
  1655. val, gadget->speed);
  1656. if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
  1657. (val & PORTPM_RWE)) ||
  1658. ((xudc->gadget.speed == USB_SPEED_SUPER) &&
  1659. (val & PORTPM_FRWE))) {
  1660. tegra_xudc_resume_device_state(xudc);
  1661. /* Send Device Notification packet. */
  1662. if (xudc->gadget.speed == USB_SPEED_SUPER) {
  1663. val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
  1664. | DEVNOTIF_LO_TRIG;
  1665. xudc_writel(xudc, 0, DEVNOTIF_HI);
  1666. xudc_writel(xudc, val, DEVNOTIF_LO);
  1667. }
  1668. }
  1669. unlock:
  1670. dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
  1671. spin_unlock_irqrestore(&xudc->lock, flags);
  1672. return ret;
  1673. }
  1674. static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1675. {
  1676. struct tegra_xudc *xudc = to_xudc(gadget);
  1677. unsigned long flags;
  1678. u32 val;
  1679. pm_runtime_get_sync(xudc->dev);
  1680. spin_lock_irqsave(&xudc->lock, flags);
  1681. if (is_on != xudc->pullup) {
  1682. val = xudc_readl(xudc, CTRL);
  1683. if (is_on)
  1684. val |= CTRL_ENABLE;
  1685. else
  1686. val &= ~CTRL_ENABLE;
  1687. xudc_writel(xudc, val, CTRL);
  1688. }
  1689. xudc->pullup = is_on;
  1690. dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
  1691. spin_unlock_irqrestore(&xudc->lock, flags);
  1692. pm_runtime_put(xudc->dev);
  1693. return 0;
  1694. }
  1695. static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
  1696. struct usb_gadget_driver *driver)
  1697. {
  1698. struct tegra_xudc *xudc = to_xudc(gadget);
  1699. unsigned long flags;
  1700. u32 val;
  1701. int ret;
  1702. unsigned int i;
  1703. if (!driver)
  1704. return -EINVAL;
  1705. pm_runtime_get_sync(xudc->dev);
  1706. spin_lock_irqsave(&xudc->lock, flags);
  1707. if (xudc->driver) {
  1708. ret = -EBUSY;
  1709. goto unlock;
  1710. }
  1711. xudc->setup_state = WAIT_FOR_SETUP;
  1712. xudc->device_state = USB_STATE_DEFAULT;
  1713. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  1714. ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
  1715. if (ret < 0)
  1716. goto unlock;
  1717. val = xudc_readl(xudc, CTRL);
  1718. val |= CTRL_IE | CTRL_LSE;
  1719. xudc_writel(xudc, val, CTRL);
  1720. val = xudc_readl(xudc, PORTHALT);
  1721. val |= PORTHALT_STCHG_INTR_EN;
  1722. xudc_writel(xudc, val, PORTHALT);
  1723. if (xudc->pullup) {
  1724. val = xudc_readl(xudc, CTRL);
  1725. val |= CTRL_ENABLE;
  1726. xudc_writel(xudc, val, CTRL);
  1727. }
  1728. for (i = 0; i < xudc->soc->num_phys; i++)
  1729. if (xudc->usbphy[i])
  1730. otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
  1731. xudc->driver = driver;
  1732. unlock:
  1733. dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
  1734. spin_unlock_irqrestore(&xudc->lock, flags);
  1735. pm_runtime_put(xudc->dev);
  1736. return ret;
  1737. }
  1738. static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
  1739. {
  1740. struct tegra_xudc *xudc = to_xudc(gadget);
  1741. unsigned long flags;
  1742. u32 val;
  1743. unsigned int i;
  1744. pm_runtime_get_sync(xudc->dev);
  1745. spin_lock_irqsave(&xudc->lock, flags);
  1746. for (i = 0; i < xudc->soc->num_phys; i++)
  1747. if (xudc->usbphy[i])
  1748. otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
  1749. val = xudc_readl(xudc, CTRL);
  1750. val &= ~(CTRL_IE | CTRL_ENABLE);
  1751. xudc_writel(xudc, val, CTRL);
  1752. __tegra_xudc_ep_disable(&xudc->ep[0]);
  1753. xudc->driver = NULL;
  1754. dev_dbg(xudc->dev, "Gadget stopped");
  1755. spin_unlock_irqrestore(&xudc->lock, flags);
  1756. pm_runtime_put(xudc->dev);
  1757. return 0;
  1758. }
  1759. static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
  1760. unsigned int m_a)
  1761. {
  1762. int ret = 0;
  1763. struct tegra_xudc *xudc = to_xudc(gadget);
  1764. dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
  1765. if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
  1766. ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
  1767. return ret;
  1768. }
  1769. static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
  1770. {
  1771. struct tegra_xudc *xudc = to_xudc(gadget);
  1772. dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
  1773. xudc->selfpowered = !!is_on;
  1774. return 0;
  1775. }
  1776. static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
  1777. .get_frame = tegra_xudc_gadget_get_frame,
  1778. .wakeup = tegra_xudc_gadget_wakeup,
  1779. .pullup = tegra_xudc_gadget_pullup,
  1780. .udc_start = tegra_xudc_gadget_start,
  1781. .udc_stop = tegra_xudc_gadget_stop,
  1782. .vbus_draw = tegra_xudc_gadget_vbus_draw,
  1783. .set_selfpowered = tegra_xudc_set_selfpowered,
  1784. };
  1785. static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
  1786. {
  1787. }
  1788. static int
  1789. tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
  1790. void (*cmpl)(struct usb_ep *, struct usb_request *))
  1791. {
  1792. xudc->ep0_req->usb_req.buf = NULL;
  1793. xudc->ep0_req->usb_req.dma = 0;
  1794. xudc->ep0_req->usb_req.length = 0;
  1795. xudc->ep0_req->usb_req.complete = cmpl;
  1796. xudc->ep0_req->usb_req.context = xudc;
  1797. return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
  1798. }
  1799. static int
  1800. tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
  1801. void (*cmpl)(struct usb_ep *, struct usb_request *))
  1802. {
  1803. xudc->ep0_req->usb_req.buf = buf;
  1804. xudc->ep0_req->usb_req.length = len;
  1805. xudc->ep0_req->usb_req.complete = cmpl;
  1806. xudc->ep0_req->usb_req.context = xudc;
  1807. return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
  1808. }
  1809. static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
  1810. {
  1811. switch (xudc->setup_state) {
  1812. case DATA_STAGE_XFER:
  1813. xudc->setup_state = STATUS_STAGE_RECV;
  1814. tegra_xudc_ep0_queue_status(xudc, no_op_complete);
  1815. break;
  1816. case DATA_STAGE_RECV:
  1817. xudc->setup_state = STATUS_STAGE_XFER;
  1818. tegra_xudc_ep0_queue_status(xudc, no_op_complete);
  1819. break;
  1820. default:
  1821. xudc->setup_state = WAIT_FOR_SETUP;
  1822. break;
  1823. }
  1824. }
  1825. static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
  1826. struct usb_ctrlrequest *ctrl)
  1827. {
  1828. int ret;
  1829. spin_unlock(&xudc->lock);
  1830. ret = xudc->driver->setup(&xudc->gadget, ctrl);
  1831. spin_lock(&xudc->lock);
  1832. return ret;
  1833. }
  1834. static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
  1835. {
  1836. struct tegra_xudc *xudc = req->context;
  1837. if (xudc->test_mode_pattern) {
  1838. xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
  1839. xudc->test_mode_pattern = 0;
  1840. }
  1841. }
  1842. static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
  1843. struct usb_ctrlrequest *ctrl)
  1844. {
  1845. bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
  1846. u32 feature = le16_to_cpu(ctrl->wValue);
  1847. u32 index = le16_to_cpu(ctrl->wIndex);
  1848. u32 val, ep;
  1849. int ret;
  1850. if (le16_to_cpu(ctrl->wLength) != 0)
  1851. return -EINVAL;
  1852. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  1853. case USB_RECIP_DEVICE:
  1854. switch (feature) {
  1855. case USB_DEVICE_REMOTE_WAKEUP:
  1856. if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
  1857. (xudc->device_state == USB_STATE_DEFAULT))
  1858. return -EINVAL;
  1859. val = xudc_readl(xudc, PORTPM);
  1860. if (set)
  1861. val |= PORTPM_RWE;
  1862. else
  1863. val &= ~PORTPM_RWE;
  1864. xudc_writel(xudc, val, PORTPM);
  1865. break;
  1866. case USB_DEVICE_U1_ENABLE:
  1867. case USB_DEVICE_U2_ENABLE:
  1868. if ((xudc->device_state != USB_STATE_CONFIGURED) ||
  1869. (xudc->gadget.speed != USB_SPEED_SUPER))
  1870. return -EINVAL;
  1871. val = xudc_readl(xudc, PORTPM);
  1872. if ((feature == USB_DEVICE_U1_ENABLE) &&
  1873. xudc->soc->u1_enable) {
  1874. if (set)
  1875. val |= PORTPM_U1E;
  1876. else
  1877. val &= ~PORTPM_U1E;
  1878. }
  1879. if ((feature == USB_DEVICE_U2_ENABLE) &&
  1880. xudc->soc->u2_enable) {
  1881. if (set)
  1882. val |= PORTPM_U2E;
  1883. else
  1884. val &= ~PORTPM_U2E;
  1885. }
  1886. xudc_writel(xudc, val, PORTPM);
  1887. break;
  1888. case USB_DEVICE_TEST_MODE:
  1889. if (xudc->gadget.speed != USB_SPEED_HIGH)
  1890. return -EINVAL;
  1891. if (!set)
  1892. return -EINVAL;
  1893. xudc->test_mode_pattern = index >> 8;
  1894. break;
  1895. default:
  1896. return -EINVAL;
  1897. }
  1898. break;
  1899. case USB_RECIP_INTERFACE:
  1900. if (xudc->device_state != USB_STATE_CONFIGURED)
  1901. return -EINVAL;
  1902. switch (feature) {
  1903. case USB_INTRF_FUNC_SUSPEND:
  1904. if (set) {
  1905. val = xudc_readl(xudc, PORTPM);
  1906. if (index & USB_INTRF_FUNC_SUSPEND_RW)
  1907. val |= PORTPM_FRWE;
  1908. else
  1909. val &= ~PORTPM_FRWE;
  1910. xudc_writel(xudc, val, PORTPM);
  1911. }
  1912. return tegra_xudc_ep0_delegate_req(xudc, ctrl);
  1913. default:
  1914. return -EINVAL;
  1915. }
  1916. break;
  1917. case USB_RECIP_ENDPOINT:
  1918. ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
  1919. ((index & USB_DIR_IN) ? 1 : 0);
  1920. if ((xudc->device_state == USB_STATE_DEFAULT) ||
  1921. ((xudc->device_state == USB_STATE_ADDRESS) &&
  1922. (index != 0)))
  1923. return -EINVAL;
  1924. ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
  1925. if (ret < 0)
  1926. return ret;
  1927. break;
  1928. default:
  1929. return -EINVAL;
  1930. }
  1931. return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
  1932. }
  1933. static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
  1934. struct usb_ctrlrequest *ctrl)
  1935. {
  1936. struct tegra_xudc_ep_context *ep_ctx;
  1937. u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
  1938. u16 status = 0;
  1939. if (!(ctrl->bRequestType & USB_DIR_IN))
  1940. return -EINVAL;
  1941. if ((le16_to_cpu(ctrl->wValue) != 0) ||
  1942. (le16_to_cpu(ctrl->wLength) != 2))
  1943. return -EINVAL;
  1944. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  1945. case USB_RECIP_DEVICE:
  1946. val = xudc_readl(xudc, PORTPM);
  1947. if (xudc->selfpowered)
  1948. status |= BIT(USB_DEVICE_SELF_POWERED);
  1949. if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
  1950. (val & PORTPM_RWE))
  1951. status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
  1952. if (xudc->gadget.speed == USB_SPEED_SUPER) {
  1953. if (val & PORTPM_U1E)
  1954. status |= BIT(USB_DEV_STAT_U1_ENABLED);
  1955. if (val & PORTPM_U2E)
  1956. status |= BIT(USB_DEV_STAT_U2_ENABLED);
  1957. }
  1958. break;
  1959. case USB_RECIP_INTERFACE:
  1960. if (xudc->gadget.speed == USB_SPEED_SUPER) {
  1961. status |= USB_INTRF_STAT_FUNC_RW_CAP;
  1962. val = xudc_readl(xudc, PORTPM);
  1963. if (val & PORTPM_FRWE)
  1964. status |= USB_INTRF_STAT_FUNC_RW;
  1965. }
  1966. break;
  1967. case USB_RECIP_ENDPOINT:
  1968. ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
  1969. ((index & USB_DIR_IN) ? 1 : 0);
  1970. ep_ctx = &xudc->ep_context[ep];
  1971. if ((xudc->device_state != USB_STATE_CONFIGURED) &&
  1972. ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
  1973. return -EINVAL;
  1974. if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
  1975. return -EINVAL;
  1976. if (xudc_readl(xudc, EP_HALT) & BIT(ep))
  1977. status |= BIT(USB_ENDPOINT_HALT);
  1978. break;
  1979. default:
  1980. return -EINVAL;
  1981. }
  1982. xudc->status_buf = cpu_to_le16(status);
  1983. return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
  1984. sizeof(xudc->status_buf),
  1985. no_op_complete);
  1986. }
  1987. static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
  1988. {
  1989. /* Nothing to do with SEL values */
  1990. }
  1991. static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
  1992. struct usb_ctrlrequest *ctrl)
  1993. {
  1994. if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
  1995. USB_TYPE_STANDARD))
  1996. return -EINVAL;
  1997. if (xudc->device_state == USB_STATE_DEFAULT)
  1998. return -EINVAL;
  1999. if ((le16_to_cpu(ctrl->wIndex) != 0) ||
  2000. (le16_to_cpu(ctrl->wValue) != 0) ||
  2001. (le16_to_cpu(ctrl->wLength) != 6))
  2002. return -EINVAL;
  2003. return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
  2004. sizeof(xudc->sel_timing),
  2005. set_sel_complete);
  2006. }
  2007. static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
  2008. {
  2009. /* Nothing to do with isoch delay */
  2010. }
  2011. static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
  2012. struct usb_ctrlrequest *ctrl)
  2013. {
  2014. u32 delay = le16_to_cpu(ctrl->wValue);
  2015. if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
  2016. USB_TYPE_STANDARD))
  2017. return -EINVAL;
  2018. if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
  2019. (le16_to_cpu(ctrl->wLength) != 0))
  2020. return -EINVAL;
  2021. xudc->isoch_delay = delay;
  2022. return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
  2023. }
  2024. static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
  2025. {
  2026. struct tegra_xudc *xudc = req->context;
  2027. if ((xudc->device_state == USB_STATE_DEFAULT) &&
  2028. (xudc->dev_addr != 0)) {
  2029. xudc->device_state = USB_STATE_ADDRESS;
  2030. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2031. } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
  2032. (xudc->dev_addr == 0)) {
  2033. xudc->device_state = USB_STATE_DEFAULT;
  2034. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2035. }
  2036. }
  2037. static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
  2038. struct usb_ctrlrequest *ctrl)
  2039. {
  2040. struct tegra_xudc_ep *ep0 = &xudc->ep[0];
  2041. u32 val, addr = le16_to_cpu(ctrl->wValue);
  2042. if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
  2043. USB_TYPE_STANDARD))
  2044. return -EINVAL;
  2045. if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
  2046. (le16_to_cpu(ctrl->wLength) != 0))
  2047. return -EINVAL;
  2048. if (xudc->device_state == USB_STATE_CONFIGURED)
  2049. return -EINVAL;
  2050. dev_dbg(xudc->dev, "set address: %u\n", addr);
  2051. xudc->dev_addr = addr;
  2052. val = xudc_readl(xudc, CTRL);
  2053. val &= ~(CTRL_DEVADDR_MASK);
  2054. val |= CTRL_DEVADDR(addr);
  2055. xudc_writel(xudc, val, CTRL);
  2056. ep_ctx_write_devaddr(ep0->context, addr);
  2057. return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
  2058. }
  2059. static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
  2060. struct usb_ctrlrequest *ctrl)
  2061. {
  2062. int ret;
  2063. switch (ctrl->bRequest) {
  2064. case USB_REQ_GET_STATUS:
  2065. dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
  2066. ret = tegra_xudc_ep0_get_status(xudc, ctrl);
  2067. break;
  2068. case USB_REQ_SET_ADDRESS:
  2069. dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
  2070. ret = tegra_xudc_ep0_set_address(xudc, ctrl);
  2071. break;
  2072. case USB_REQ_SET_SEL:
  2073. dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
  2074. ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
  2075. break;
  2076. case USB_REQ_SET_ISOCH_DELAY:
  2077. dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
  2078. ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
  2079. break;
  2080. case USB_REQ_CLEAR_FEATURE:
  2081. case USB_REQ_SET_FEATURE:
  2082. dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
  2083. ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
  2084. break;
  2085. case USB_REQ_SET_CONFIGURATION:
  2086. dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
  2087. /*
  2088. * In theory we need to clear RUN bit before status stage of
  2089. * deconfig request sent, but this seems to be causing problems.
  2090. * Clear RUN once all endpoints are disabled instead.
  2091. */
  2092. fallthrough;
  2093. default:
  2094. ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
  2095. break;
  2096. }
  2097. return ret;
  2098. }
  2099. static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
  2100. struct usb_ctrlrequest *ctrl,
  2101. u16 seq_num)
  2102. {
  2103. int ret;
  2104. xudc->setup_seq_num = seq_num;
  2105. /* Ensure EP0 is unhalted. */
  2106. ep_unhalt(xudc, 0);
  2107. /*
  2108. * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
  2109. * are invalid. Halt EP0 until we get a valid packet.
  2110. */
  2111. if (xudc->soc->invalid_seq_num &&
  2112. (seq_num == 0xfffe || seq_num == 0xffff)) {
  2113. dev_warn(xudc->dev, "invalid sequence number detected\n");
  2114. ep_halt(xudc, 0);
  2115. return;
  2116. }
  2117. if (ctrl->wLength)
  2118. xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
  2119. DATA_STAGE_XFER : DATA_STAGE_RECV;
  2120. else
  2121. xudc->setup_state = STATUS_STAGE_XFER;
  2122. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  2123. ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
  2124. else
  2125. ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
  2126. if (ret < 0) {
  2127. dev_warn(xudc->dev, "setup request failed: %d\n", ret);
  2128. xudc->setup_state = WAIT_FOR_SETUP;
  2129. ep_halt(xudc, 0);
  2130. }
  2131. }
  2132. static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
  2133. struct tegra_xudc_trb *event)
  2134. {
  2135. struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
  2136. u16 seq_num = trb_read_seq_num(event);
  2137. if (xudc->setup_state != WAIT_FOR_SETUP) {
  2138. /*
  2139. * The controller is in the process of handling another
  2140. * setup request. Queue subsequent requests and handle
  2141. * the last one once the controller reports a sequence
  2142. * number error.
  2143. */
  2144. memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
  2145. xudc->setup_packet.seq_num = seq_num;
  2146. xudc->queued_setup_packet = true;
  2147. } else {
  2148. tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
  2149. }
  2150. }
  2151. static struct tegra_xudc_request *
  2152. trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
  2153. {
  2154. struct tegra_xudc_request *req;
  2155. list_for_each_entry(req, &ep->queue, list) {
  2156. if (!req->trbs_queued)
  2157. break;
  2158. if (trb_in_request(ep, req, trb))
  2159. return req;
  2160. }
  2161. return NULL;
  2162. }
  2163. static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
  2164. struct tegra_xudc_ep *ep,
  2165. struct tegra_xudc_trb *event)
  2166. {
  2167. struct tegra_xudc_request *req;
  2168. struct tegra_xudc_trb *trb;
  2169. bool short_packet;
  2170. short_packet = (trb_read_cmpl_code(event) ==
  2171. TRB_CMPL_CODE_SHORT_PACKET);
  2172. trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
  2173. req = trb_to_request(ep, trb);
  2174. /*
  2175. * TDs are complete on short packet or when the completed TRB is the
  2176. * last TRB in the TD (the CHAIN bit is unset).
  2177. */
  2178. if (req && (short_packet || (!trb_read_chain(trb) &&
  2179. (req->trbs_needed == req->trbs_queued)))) {
  2180. struct tegra_xudc_trb *last = req->last_trb;
  2181. unsigned int residual;
  2182. residual = trb_read_transfer_len(event);
  2183. req->usb_req.actual = req->usb_req.length - residual;
  2184. dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
  2185. req->usb_req.actual, req->usb_req.length);
  2186. tegra_xudc_req_done(ep, req, 0);
  2187. if (ep->desc && usb_endpoint_xfer_control(ep->desc))
  2188. tegra_xudc_ep0_req_done(xudc);
  2189. /*
  2190. * Advance the dequeue pointer past the end of the current TD
  2191. * on short packet completion.
  2192. */
  2193. if (short_packet) {
  2194. ep->deq_ptr = (last - ep->transfer_ring) + 1;
  2195. if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
  2196. ep->deq_ptr = 0;
  2197. }
  2198. } else if (!req) {
  2199. dev_warn(xudc->dev, "transfer event on dequeued request\n");
  2200. }
  2201. if (ep->desc)
  2202. tegra_xudc_ep_kick_queue(ep);
  2203. }
  2204. static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
  2205. struct tegra_xudc_trb *event)
  2206. {
  2207. unsigned int ep_index = trb_read_endpoint_id(event);
  2208. struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
  2209. struct tegra_xudc_trb *trb;
  2210. u16 comp_code;
  2211. if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
  2212. dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
  2213. ep_index);
  2214. return;
  2215. }
  2216. /* Update transfer ring dequeue pointer. */
  2217. trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
  2218. comp_code = trb_read_cmpl_code(event);
  2219. if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
  2220. ep->deq_ptr = (trb - ep->transfer_ring) + 1;
  2221. if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
  2222. ep->deq_ptr = 0;
  2223. ep->ring_full = false;
  2224. }
  2225. switch (comp_code) {
  2226. case TRB_CMPL_CODE_SUCCESS:
  2227. case TRB_CMPL_CODE_SHORT_PACKET:
  2228. tegra_xudc_handle_transfer_completion(xudc, ep, event);
  2229. break;
  2230. case TRB_CMPL_CODE_HOST_REJECTED:
  2231. dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
  2232. ep->stream_rejected = true;
  2233. break;
  2234. case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
  2235. dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
  2236. if (ep->stream_rejected) {
  2237. ep->stream_rejected = false;
  2238. /*
  2239. * An EP is stopped when a stream is rejected. Wait
  2240. * for the EP to report that it is stopped and then
  2241. * un-stop it.
  2242. */
  2243. ep_wait_for_stopped(xudc, ep_index);
  2244. }
  2245. tegra_xudc_ep_ring_doorbell(ep);
  2246. break;
  2247. case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
  2248. /*
  2249. * Wait for the EP to be stopped so the controller stops
  2250. * processing doorbells.
  2251. */
  2252. ep_wait_for_stopped(xudc, ep_index);
  2253. ep->enq_ptr = ep->deq_ptr;
  2254. tegra_xudc_ep_nuke(ep, -EIO);
  2255. fallthrough;
  2256. case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
  2257. case TRB_CMPL_CODE_CTRL_DIR_ERR:
  2258. case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
  2259. case TRB_CMPL_CODE_RING_UNDERRUN:
  2260. case TRB_CMPL_CODE_RING_OVERRUN:
  2261. case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
  2262. case TRB_CMPL_CODE_USB_TRANS_ERR:
  2263. case TRB_CMPL_CODE_TRB_ERR:
  2264. dev_err(xudc->dev, "completion error %#x on EP %u\n",
  2265. comp_code, ep_index);
  2266. ep_halt(xudc, ep_index);
  2267. break;
  2268. case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
  2269. dev_info(xudc->dev, "sequence number error\n");
  2270. /*
  2271. * Kill any queued control request and skip to the last
  2272. * setup packet we received.
  2273. */
  2274. tegra_xudc_ep_nuke(ep, -EINVAL);
  2275. xudc->setup_state = WAIT_FOR_SETUP;
  2276. if (!xudc->queued_setup_packet)
  2277. break;
  2278. tegra_xudc_handle_ep0_setup_packet(xudc,
  2279. &xudc->setup_packet.ctrl_req,
  2280. xudc->setup_packet.seq_num);
  2281. xudc->queued_setup_packet = false;
  2282. break;
  2283. case TRB_CMPL_CODE_STOPPED:
  2284. dev_dbg(xudc->dev, "stop completion code on EP %u\n",
  2285. ep_index);
  2286. /* Disconnected. */
  2287. tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
  2288. break;
  2289. default:
  2290. dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
  2291. comp_code, ep_index);
  2292. break;
  2293. }
  2294. }
  2295. static void tegra_xudc_reset(struct tegra_xudc *xudc)
  2296. {
  2297. struct tegra_xudc_ep *ep0 = &xudc->ep[0];
  2298. dma_addr_t deq_ptr;
  2299. unsigned int i;
  2300. xudc->setup_state = WAIT_FOR_SETUP;
  2301. xudc->device_state = USB_STATE_DEFAULT;
  2302. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2303. ep_unpause_all(xudc);
  2304. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
  2305. tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
  2306. /*
  2307. * Reset sequence number and dequeue pointer to flush the transfer
  2308. * ring.
  2309. */
  2310. ep0->deq_ptr = ep0->enq_ptr;
  2311. ep0->ring_full = false;
  2312. xudc->setup_seq_num = 0;
  2313. xudc->queued_setup_packet = false;
  2314. ep_ctx_write_rsvd(ep0->context, 0);
  2315. ep_ctx_write_partial_td(ep0->context, 0);
  2316. ep_ctx_write_splitxstate(ep0->context, 0);
  2317. ep_ctx_write_seq_num(ep0->context, 0);
  2318. deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
  2319. if (!dma_mapping_error(xudc->dev, deq_ptr)) {
  2320. ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
  2321. ep_ctx_write_dcs(ep0->context, ep0->pcs);
  2322. }
  2323. ep_unhalt_all(xudc);
  2324. ep_reload(xudc, 0);
  2325. ep_unpause(xudc, 0);
  2326. }
  2327. static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
  2328. {
  2329. struct tegra_xudc_ep *ep0 = &xudc->ep[0];
  2330. u16 maxpacket;
  2331. u32 val;
  2332. val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
  2333. switch (val) {
  2334. case PORTSC_PS_LS:
  2335. xudc->gadget.speed = USB_SPEED_LOW;
  2336. break;
  2337. case PORTSC_PS_FS:
  2338. xudc->gadget.speed = USB_SPEED_FULL;
  2339. break;
  2340. case PORTSC_PS_HS:
  2341. xudc->gadget.speed = USB_SPEED_HIGH;
  2342. break;
  2343. case PORTSC_PS_SS:
  2344. xudc->gadget.speed = USB_SPEED_SUPER;
  2345. break;
  2346. default:
  2347. xudc->gadget.speed = USB_SPEED_UNKNOWN;
  2348. break;
  2349. }
  2350. xudc->device_state = USB_STATE_DEFAULT;
  2351. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2352. xudc->setup_state = WAIT_FOR_SETUP;
  2353. if (xudc->gadget.speed == USB_SPEED_SUPER)
  2354. maxpacket = 512;
  2355. else
  2356. maxpacket = 64;
  2357. ep_ctx_write_max_packet_size(ep0->context, maxpacket);
  2358. tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
  2359. usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
  2360. if (!xudc->soc->u1_enable) {
  2361. val = xudc_readl(xudc, PORTPM);
  2362. val &= ~(PORTPM_U1TIMEOUT_MASK);
  2363. xudc_writel(xudc, val, PORTPM);
  2364. }
  2365. if (!xudc->soc->u2_enable) {
  2366. val = xudc_readl(xudc, PORTPM);
  2367. val &= ~(PORTPM_U2TIMEOUT_MASK);
  2368. xudc_writel(xudc, val, PORTPM);
  2369. }
  2370. if (xudc->gadget.speed <= USB_SPEED_HIGH) {
  2371. val = xudc_readl(xudc, PORTPM);
  2372. val &= ~(PORTPM_L1S_MASK);
  2373. if (xudc->soc->lpm_enable)
  2374. val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
  2375. else
  2376. val |= PORTPM_L1S(PORTPM_L1S_NYET);
  2377. xudc_writel(xudc, val, PORTPM);
  2378. }
  2379. val = xudc_readl(xudc, ST);
  2380. if (val & ST_RC)
  2381. xudc_writel(xudc, ST_RC, ST);
  2382. }
  2383. static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
  2384. {
  2385. tegra_xudc_reset(xudc);
  2386. if (xudc->driver && xudc->driver->disconnect) {
  2387. spin_unlock(&xudc->lock);
  2388. xudc->driver->disconnect(&xudc->gadget);
  2389. spin_lock(&xudc->lock);
  2390. }
  2391. xudc->device_state = USB_STATE_NOTATTACHED;
  2392. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2393. complete(&xudc->disconnect_complete);
  2394. }
  2395. static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
  2396. {
  2397. tegra_xudc_reset(xudc);
  2398. if (xudc->driver) {
  2399. spin_unlock(&xudc->lock);
  2400. usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
  2401. spin_lock(&xudc->lock);
  2402. }
  2403. tegra_xudc_port_connect(xudc);
  2404. }
  2405. static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
  2406. {
  2407. dev_dbg(xudc->dev, "port suspend\n");
  2408. xudc->resume_state = xudc->device_state;
  2409. xudc->device_state = USB_STATE_SUSPENDED;
  2410. usb_gadget_set_state(&xudc->gadget, xudc->device_state);
  2411. if (xudc->driver->suspend) {
  2412. spin_unlock(&xudc->lock);
  2413. xudc->driver->suspend(&xudc->gadget);
  2414. spin_lock(&xudc->lock);
  2415. }
  2416. }
  2417. static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
  2418. {
  2419. dev_dbg(xudc->dev, "port resume\n");
  2420. tegra_xudc_resume_device_state(xudc);
  2421. if (xudc->driver->resume) {
  2422. spin_unlock(&xudc->lock);
  2423. xudc->driver->resume(&xudc->gadget);
  2424. spin_lock(&xudc->lock);
  2425. }
  2426. }
  2427. static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
  2428. {
  2429. u32 val;
  2430. val = xudc_readl(xudc, PORTSC);
  2431. val &= ~PORTSC_CHANGE_MASK;
  2432. val |= flag;
  2433. xudc_writel(xudc, val, PORTSC);
  2434. }
  2435. static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
  2436. {
  2437. u32 portsc, porthalt;
  2438. porthalt = xudc_readl(xudc, PORTHALT);
  2439. if ((porthalt & PORTHALT_STCHG_REQ) &&
  2440. (porthalt & PORTHALT_HALT_LTSSM)) {
  2441. dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
  2442. porthalt &= ~PORTHALT_HALT_LTSSM;
  2443. xudc_writel(xudc, porthalt, PORTHALT);
  2444. }
  2445. portsc = xudc_readl(xudc, PORTSC);
  2446. if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
  2447. dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
  2448. clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
  2449. #define TOGGLE_VBUS_WAIT_MS 100
  2450. if (xudc->soc->port_reset_quirk) {
  2451. schedule_delayed_work(&xudc->port_reset_war_work,
  2452. msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
  2453. xudc->wait_for_sec_prc = 1;
  2454. }
  2455. }
  2456. if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
  2457. dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
  2458. clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
  2459. tegra_xudc_port_reset(xudc);
  2460. cancel_delayed_work(&xudc->port_reset_war_work);
  2461. xudc->wait_for_sec_prc = 0;
  2462. }
  2463. portsc = xudc_readl(xudc, PORTSC);
  2464. if (portsc & PORTSC_WRC) {
  2465. dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
  2466. clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
  2467. if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
  2468. tegra_xudc_port_reset(xudc);
  2469. }
  2470. portsc = xudc_readl(xudc, PORTSC);
  2471. if (portsc & PORTSC_CSC) {
  2472. dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
  2473. clear_port_change(xudc, PORTSC_CSC);
  2474. if (portsc & PORTSC_CCS)
  2475. tegra_xudc_port_connect(xudc);
  2476. else
  2477. tegra_xudc_port_disconnect(xudc);
  2478. if (xudc->wait_csc) {
  2479. cancel_delayed_work(&xudc->plc_reset_work);
  2480. xudc->wait_csc = false;
  2481. }
  2482. }
  2483. portsc = xudc_readl(xudc, PORTSC);
  2484. if (portsc & PORTSC_PLC) {
  2485. u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
  2486. dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
  2487. clear_port_change(xudc, PORTSC_PLC);
  2488. switch (pls) {
  2489. case PORTSC_PLS_U3:
  2490. tegra_xudc_port_suspend(xudc);
  2491. break;
  2492. case PORTSC_PLS_U0:
  2493. if (xudc->gadget.speed < USB_SPEED_SUPER)
  2494. tegra_xudc_port_resume(xudc);
  2495. break;
  2496. case PORTSC_PLS_RESUME:
  2497. if (xudc->gadget.speed == USB_SPEED_SUPER)
  2498. tegra_xudc_port_resume(xudc);
  2499. break;
  2500. case PORTSC_PLS_INACTIVE:
  2501. schedule_delayed_work(&xudc->plc_reset_work,
  2502. msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
  2503. xudc->wait_csc = true;
  2504. break;
  2505. default:
  2506. break;
  2507. }
  2508. }
  2509. if (portsc & PORTSC_CEC) {
  2510. dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
  2511. clear_port_change(xudc, PORTSC_CEC);
  2512. }
  2513. dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
  2514. }
  2515. static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
  2516. {
  2517. while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
  2518. (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
  2519. __tegra_xudc_handle_port_status(xudc);
  2520. }
  2521. static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
  2522. struct tegra_xudc_trb *event)
  2523. {
  2524. u32 type = trb_read_type(event);
  2525. dump_trb(xudc, "EVENT", event);
  2526. switch (type) {
  2527. case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
  2528. tegra_xudc_handle_port_status(xudc);
  2529. break;
  2530. case TRB_TYPE_TRANSFER_EVENT:
  2531. tegra_xudc_handle_transfer_event(xudc, event);
  2532. break;
  2533. case TRB_TYPE_SETUP_PACKET_EVENT:
  2534. tegra_xudc_handle_ep0_event(xudc, event);
  2535. break;
  2536. default:
  2537. dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
  2538. break;
  2539. }
  2540. }
  2541. static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
  2542. {
  2543. struct tegra_xudc_trb *event;
  2544. dma_addr_t erdp;
  2545. while (true) {
  2546. event = xudc->event_ring[xudc->event_ring_index] +
  2547. xudc->event_ring_deq_ptr;
  2548. if (trb_read_cycle(event) != xudc->ccs)
  2549. break;
  2550. tegra_xudc_handle_event(xudc, event);
  2551. xudc->event_ring_deq_ptr++;
  2552. if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
  2553. xudc->event_ring_deq_ptr = 0;
  2554. xudc->event_ring_index++;
  2555. }
  2556. if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
  2557. xudc->event_ring_index = 0;
  2558. xudc->ccs = !xudc->ccs;
  2559. }
  2560. }
  2561. erdp = xudc->event_ring_phys[xudc->event_ring_index] +
  2562. xudc->event_ring_deq_ptr * sizeof(*event);
  2563. xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
  2564. xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
  2565. }
  2566. static irqreturn_t tegra_xudc_irq(int irq, void *data)
  2567. {
  2568. struct tegra_xudc *xudc = data;
  2569. unsigned long flags;
  2570. u32 val;
  2571. val = xudc_readl(xudc, ST);
  2572. if (!(val & ST_IP))
  2573. return IRQ_NONE;
  2574. xudc_writel(xudc, ST_IP, ST);
  2575. spin_lock_irqsave(&xudc->lock, flags);
  2576. tegra_xudc_process_event_ring(xudc);
  2577. spin_unlock_irqrestore(&xudc->lock, flags);
  2578. return IRQ_HANDLED;
  2579. }
  2580. static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
  2581. {
  2582. struct tegra_xudc_ep *ep = &xudc->ep[index];
  2583. ep->xudc = xudc;
  2584. ep->index = index;
  2585. ep->context = &xudc->ep_context[index];
  2586. INIT_LIST_HEAD(&ep->queue);
  2587. /*
  2588. * EP1 would be the input endpoint corresponding to EP0, but since
  2589. * EP0 is bi-directional, EP1 is unused.
  2590. */
  2591. if (index == 1)
  2592. return 0;
  2593. ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
  2594. GFP_KERNEL,
  2595. &ep->transfer_ring_phys);
  2596. if (!ep->transfer_ring)
  2597. return -ENOMEM;
  2598. if (index) {
  2599. snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
  2600. (index % 2 == 0) ? "out" : "in");
  2601. ep->usb_ep.name = ep->name;
  2602. usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
  2603. ep->usb_ep.max_streams = 16;
  2604. ep->usb_ep.ops = &tegra_xudc_ep_ops;
  2605. ep->usb_ep.caps.type_bulk = true;
  2606. ep->usb_ep.caps.type_int = true;
  2607. if (index & 1)
  2608. ep->usb_ep.caps.dir_in = true;
  2609. else
  2610. ep->usb_ep.caps.dir_out = true;
  2611. list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
  2612. } else {
  2613. strscpy(ep->name, "ep0", 3);
  2614. ep->usb_ep.name = ep->name;
  2615. usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
  2616. ep->usb_ep.ops = &tegra_xudc_ep0_ops;
  2617. ep->usb_ep.caps.type_control = true;
  2618. ep->usb_ep.caps.dir_in = true;
  2619. ep->usb_ep.caps.dir_out = true;
  2620. }
  2621. return 0;
  2622. }
  2623. static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
  2624. {
  2625. struct tegra_xudc_ep *ep = &xudc->ep[index];
  2626. /*
  2627. * EP1 would be the input endpoint corresponding to EP0, but since
  2628. * EP0 is bi-directional, EP1 is unused.
  2629. */
  2630. if (index == 1)
  2631. return;
  2632. dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
  2633. ep->transfer_ring_phys);
  2634. }
  2635. static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
  2636. {
  2637. struct usb_request *req;
  2638. unsigned int i;
  2639. int err;
  2640. xudc->ep_context =
  2641. dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
  2642. sizeof(*xudc->ep_context),
  2643. &xudc->ep_context_phys, GFP_KERNEL);
  2644. if (!xudc->ep_context)
  2645. return -ENOMEM;
  2646. xudc->transfer_ring_pool =
  2647. dmam_pool_create(dev_name(xudc->dev), xudc->dev,
  2648. XUDC_TRANSFER_RING_SIZE *
  2649. sizeof(struct tegra_xudc_trb),
  2650. sizeof(struct tegra_xudc_trb), 0);
  2651. if (!xudc->transfer_ring_pool) {
  2652. err = -ENOMEM;
  2653. goto free_ep_context;
  2654. }
  2655. INIT_LIST_HEAD(&xudc->gadget.ep_list);
  2656. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
  2657. err = tegra_xudc_alloc_ep(xudc, i);
  2658. if (err < 0)
  2659. goto free_eps;
  2660. }
  2661. req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
  2662. if (!req) {
  2663. err = -ENOMEM;
  2664. goto free_eps;
  2665. }
  2666. xudc->ep0_req = to_xudc_req(req);
  2667. return 0;
  2668. free_eps:
  2669. for (; i > 0; i--)
  2670. tegra_xudc_free_ep(xudc, i - 1);
  2671. free_ep_context:
  2672. dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
  2673. xudc->ep_context, xudc->ep_context_phys);
  2674. return err;
  2675. }
  2676. static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
  2677. {
  2678. xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
  2679. xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
  2680. }
  2681. static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
  2682. {
  2683. unsigned int i;
  2684. tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
  2685. &xudc->ep0_req->usb_req);
  2686. for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
  2687. tegra_xudc_free_ep(xudc, i);
  2688. dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
  2689. xudc->ep_context, xudc->ep_context_phys);
  2690. }
  2691. static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
  2692. {
  2693. unsigned int i;
  2694. for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
  2695. xudc->event_ring[i] =
  2696. dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
  2697. sizeof(*xudc->event_ring[i]),
  2698. &xudc->event_ring_phys[i],
  2699. GFP_KERNEL);
  2700. if (!xudc->event_ring[i])
  2701. goto free_dma;
  2702. }
  2703. return 0;
  2704. free_dma:
  2705. for (; i > 0; i--) {
  2706. dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
  2707. sizeof(*xudc->event_ring[i - 1]),
  2708. xudc->event_ring[i - 1],
  2709. xudc->event_ring_phys[i - 1]);
  2710. }
  2711. return -ENOMEM;
  2712. }
  2713. static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
  2714. {
  2715. unsigned int i;
  2716. u32 val;
  2717. for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
  2718. memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
  2719. sizeof(*xudc->event_ring[i]));
  2720. val = xudc_readl(xudc, ERSTSZ);
  2721. val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
  2722. val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
  2723. xudc_writel(xudc, val, ERSTSZ);
  2724. xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
  2725. ERSTXBALO(i));
  2726. xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
  2727. ERSTXBAHI(i));
  2728. }
  2729. val = lower_32_bits(xudc->event_ring_phys[0]);
  2730. xudc_writel(xudc, val, ERDPLO);
  2731. val |= EREPLO_ECS;
  2732. xudc_writel(xudc, val, EREPLO);
  2733. val = upper_32_bits(xudc->event_ring_phys[0]);
  2734. xudc_writel(xudc, val, ERDPHI);
  2735. xudc_writel(xudc, val, EREPHI);
  2736. xudc->ccs = true;
  2737. xudc->event_ring_index = 0;
  2738. xudc->event_ring_deq_ptr = 0;
  2739. }
  2740. static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
  2741. {
  2742. unsigned int i;
  2743. for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
  2744. dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
  2745. sizeof(*xudc->event_ring[i]),
  2746. xudc->event_ring[i],
  2747. xudc->event_ring_phys[i]);
  2748. }
  2749. }
  2750. static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
  2751. {
  2752. u32 val;
  2753. if (xudc->soc->has_ipfs) {
  2754. val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
  2755. val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
  2756. ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
  2757. usleep_range(10, 15);
  2758. }
  2759. /* Enable bus master */
  2760. val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
  2761. XUSB_DEV_CFG_1_BUS_MASTER_EN;
  2762. fpci_writel(xudc, val, XUSB_DEV_CFG_1);
  2763. /* Program BAR0 space */
  2764. val = fpci_readl(xudc, XUSB_DEV_CFG_4);
  2765. val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
  2766. val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
  2767. fpci_writel(xudc, val, XUSB_DEV_CFG_4);
  2768. fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
  2769. usleep_range(100, 200);
  2770. if (xudc->soc->has_ipfs) {
  2771. /* Enable interrupt assertion */
  2772. val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
  2773. val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
  2774. ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
  2775. }
  2776. }
  2777. static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
  2778. {
  2779. u32 val, imod;
  2780. if (xudc->soc->has_ipfs) {
  2781. val = xudc_readl(xudc, BLCG);
  2782. val |= BLCG_ALL;
  2783. val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
  2784. BLCG_COREPLL_PWRDN);
  2785. val |= BLCG_IOPLL_0_PWRDN;
  2786. val |= BLCG_IOPLL_1_PWRDN;
  2787. val |= BLCG_IOPLL_2_PWRDN;
  2788. xudc_writel(xudc, val, BLCG);
  2789. }
  2790. if (xudc->soc->port_speed_quirk)
  2791. tegra_xudc_limit_port_speed(xudc);
  2792. /* Set a reasonable U3 exit timer value. */
  2793. val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
  2794. val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
  2795. val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
  2796. xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
  2797. /* Default ping LFPS tBurst is too large. */
  2798. val = xudc_readl(xudc, SSPX_CORE_CNT0);
  2799. val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
  2800. val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
  2801. xudc_writel(xudc, val, SSPX_CORE_CNT0);
  2802. /* Default tPortConfiguration timeout is too small. */
  2803. val = xudc_readl(xudc, SSPX_CORE_CNT30);
  2804. val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
  2805. val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
  2806. xudc_writel(xudc, val, SSPX_CORE_CNT30);
  2807. if (xudc->soc->lpm_enable) {
  2808. /* Set L1 resume duration to 95 us. */
  2809. val = xudc_readl(xudc, HSFSPI_COUNT13);
  2810. val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
  2811. val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
  2812. xudc_writel(xudc, val, HSFSPI_COUNT13);
  2813. }
  2814. /*
  2815. * Compliance suite appears to be violating polling LFPS tBurst max
  2816. * of 1.4us. Send 1.45us instead.
  2817. */
  2818. val = xudc_readl(xudc, SSPX_CORE_CNT32);
  2819. val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
  2820. val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
  2821. xudc_writel(xudc, val, SSPX_CORE_CNT32);
  2822. /* Direct HS/FS port instance to RxDetect. */
  2823. val = xudc_readl(xudc, CFG_DEV_FE);
  2824. val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
  2825. val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
  2826. xudc_writel(xudc, val, CFG_DEV_FE);
  2827. val = xudc_readl(xudc, PORTSC);
  2828. val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
  2829. val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
  2830. xudc_writel(xudc, val, PORTSC);
  2831. /* Direct SS port instance to RxDetect. */
  2832. val = xudc_readl(xudc, CFG_DEV_FE);
  2833. val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
  2834. val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
  2835. xudc_writel(xudc, val, CFG_DEV_FE);
  2836. val = xudc_readl(xudc, PORTSC);
  2837. val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
  2838. val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
  2839. xudc_writel(xudc, val, PORTSC);
  2840. /* Restore port instance. */
  2841. val = xudc_readl(xudc, CFG_DEV_FE);
  2842. val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
  2843. xudc_writel(xudc, val, CFG_DEV_FE);
  2844. /*
  2845. * Enable INFINITE_SS_RETRY to prevent device from entering
  2846. * Disabled.Error when attached to buggy SuperSpeed hubs.
  2847. */
  2848. val = xudc_readl(xudc, CFG_DEV_FE);
  2849. val |= CFG_DEV_FE_INFINITE_SS_RETRY;
  2850. xudc_writel(xudc, val, CFG_DEV_FE);
  2851. /* Set interrupt moderation. */
  2852. imod = XUDC_INTERRUPT_MODERATION_US * 4;
  2853. val = xudc_readl(xudc, RT_IMOD);
  2854. val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
  2855. val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
  2856. xudc_writel(xudc, val, RT_IMOD);
  2857. /* increase SSPI transaction timeout from 32us to 512us */
  2858. val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
  2859. val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
  2860. val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
  2861. xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
  2862. }
  2863. static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
  2864. {
  2865. int err = 0, usb3;
  2866. unsigned int i;
  2867. xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
  2868. sizeof(*xudc->utmi_phy), GFP_KERNEL);
  2869. if (!xudc->utmi_phy)
  2870. return -ENOMEM;
  2871. xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
  2872. sizeof(*xudc->usb3_phy), GFP_KERNEL);
  2873. if (!xudc->usb3_phy)
  2874. return -ENOMEM;
  2875. xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
  2876. sizeof(*xudc->usbphy), GFP_KERNEL);
  2877. if (!xudc->usbphy)
  2878. return -ENOMEM;
  2879. xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
  2880. for (i = 0; i < xudc->soc->num_phys; i++) {
  2881. char phy_name[] = "usb.-.";
  2882. /* Get USB2 phy */
  2883. snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
  2884. xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
  2885. if (IS_ERR(xudc->utmi_phy[i])) {
  2886. err = PTR_ERR(xudc->utmi_phy[i]);
  2887. dev_err_probe(xudc->dev, err,
  2888. "failed to get usb2-%d PHY\n", i);
  2889. goto clean_up;
  2890. } else if (xudc->utmi_phy[i]) {
  2891. /* Get usb-phy, if utmi phy is available */
  2892. xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
  2893. xudc->utmi_phy[i]->dev.of_node,
  2894. &xudc->vbus_nb);
  2895. if (IS_ERR(xudc->usbphy[i])) {
  2896. err = PTR_ERR(xudc->usbphy[i]);
  2897. dev_err_probe(xudc->dev, err,
  2898. "failed to get usbphy-%d\n", i);
  2899. goto clean_up;
  2900. }
  2901. } else if (!xudc->utmi_phy[i]) {
  2902. /* if utmi phy is not available, ignore USB3 phy get */
  2903. continue;
  2904. }
  2905. /* Get USB3 phy */
  2906. usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
  2907. if (usb3 < 0)
  2908. continue;
  2909. snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
  2910. xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
  2911. if (IS_ERR(xudc->usb3_phy[i])) {
  2912. err = PTR_ERR(xudc->usb3_phy[i]);
  2913. dev_err_probe(xudc->dev, err,
  2914. "failed to get usb3-%d PHY\n", usb3);
  2915. goto clean_up;
  2916. } else if (xudc->usb3_phy[i])
  2917. dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
  2918. }
  2919. return err;
  2920. clean_up:
  2921. for (i = 0; i < xudc->soc->num_phys; i++) {
  2922. xudc->usb3_phy[i] = NULL;
  2923. xudc->utmi_phy[i] = NULL;
  2924. xudc->usbphy[i] = NULL;
  2925. }
  2926. return err;
  2927. }
  2928. static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
  2929. {
  2930. unsigned int i;
  2931. for (i = 0; i < xudc->soc->num_phys; i++) {
  2932. phy_exit(xudc->usb3_phy[i]);
  2933. phy_exit(xudc->utmi_phy[i]);
  2934. }
  2935. }
  2936. static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
  2937. {
  2938. int err;
  2939. unsigned int i;
  2940. for (i = 0; i < xudc->soc->num_phys; i++) {
  2941. err = phy_init(xudc->utmi_phy[i]);
  2942. if (err < 0) {
  2943. dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
  2944. goto exit_phy;
  2945. }
  2946. err = phy_init(xudc->usb3_phy[i]);
  2947. if (err < 0) {
  2948. dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
  2949. goto exit_phy;
  2950. }
  2951. }
  2952. return 0;
  2953. exit_phy:
  2954. tegra_xudc_phy_exit(xudc);
  2955. return err;
  2956. }
  2957. static const char * const tegra210_xudc_supply_names[] = {
  2958. "hvdd-usb",
  2959. "avddio-usb",
  2960. };
  2961. static const char * const tegra210_xudc_clock_names[] = {
  2962. "dev",
  2963. "ss",
  2964. "ss_src",
  2965. "hs_src",
  2966. "fs_src",
  2967. };
  2968. static const char * const tegra186_xudc_clock_names[] = {
  2969. "dev",
  2970. "ss",
  2971. "ss_src",
  2972. "fs_src",
  2973. };
  2974. static struct tegra_xudc_soc tegra210_xudc_soc_data = {
  2975. .supply_names = tegra210_xudc_supply_names,
  2976. .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
  2977. .clock_names = tegra210_xudc_clock_names,
  2978. .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
  2979. .num_phys = 4,
  2980. .u1_enable = false,
  2981. .u2_enable = true,
  2982. .lpm_enable = false,
  2983. .invalid_seq_num = true,
  2984. .pls_quirk = true,
  2985. .port_reset_quirk = true,
  2986. .port_speed_quirk = false,
  2987. .has_ipfs = true,
  2988. };
  2989. static struct tegra_xudc_soc tegra186_xudc_soc_data = {
  2990. .clock_names = tegra186_xudc_clock_names,
  2991. .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
  2992. .num_phys = 4,
  2993. .u1_enable = true,
  2994. .u2_enable = true,
  2995. .lpm_enable = false,
  2996. .invalid_seq_num = false,
  2997. .pls_quirk = false,
  2998. .port_reset_quirk = false,
  2999. .port_speed_quirk = false,
  3000. .has_ipfs = false,
  3001. };
  3002. static struct tegra_xudc_soc tegra194_xudc_soc_data = {
  3003. .clock_names = tegra186_xudc_clock_names,
  3004. .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
  3005. .num_phys = 4,
  3006. .u1_enable = true,
  3007. .u2_enable = true,
  3008. .lpm_enable = true,
  3009. .invalid_seq_num = false,
  3010. .pls_quirk = false,
  3011. .port_reset_quirk = false,
  3012. .port_speed_quirk = true,
  3013. .has_ipfs = false,
  3014. };
  3015. static const struct of_device_id tegra_xudc_of_match[] = {
  3016. {
  3017. .compatible = "nvidia,tegra210-xudc",
  3018. .data = &tegra210_xudc_soc_data
  3019. },
  3020. {
  3021. .compatible = "nvidia,tegra186-xudc",
  3022. .data = &tegra186_xudc_soc_data
  3023. },
  3024. {
  3025. .compatible = "nvidia,tegra194-xudc",
  3026. .data = &tegra194_xudc_soc_data
  3027. },
  3028. { }
  3029. };
  3030. MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
  3031. static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
  3032. {
  3033. if (xudc->genpd_dl_ss)
  3034. device_link_del(xudc->genpd_dl_ss);
  3035. if (xudc->genpd_dl_device)
  3036. device_link_del(xudc->genpd_dl_device);
  3037. if (xudc->genpd_dev_ss)
  3038. dev_pm_domain_detach(xudc->genpd_dev_ss, true);
  3039. if (xudc->genpd_dev_device)
  3040. dev_pm_domain_detach(xudc->genpd_dev_device, true);
  3041. }
  3042. static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
  3043. {
  3044. struct device *dev = xudc->dev;
  3045. int err;
  3046. xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
  3047. if (IS_ERR(xudc->genpd_dev_device)) {
  3048. err = PTR_ERR(xudc->genpd_dev_device);
  3049. dev_err(dev, "failed to get device power domain: %d\n", err);
  3050. return err;
  3051. }
  3052. xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
  3053. if (IS_ERR(xudc->genpd_dev_ss)) {
  3054. err = PTR_ERR(xudc->genpd_dev_ss);
  3055. dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
  3056. return err;
  3057. }
  3058. xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
  3059. DL_FLAG_PM_RUNTIME |
  3060. DL_FLAG_STATELESS);
  3061. if (!xudc->genpd_dl_device) {
  3062. dev_err(dev, "failed to add USB device link\n");
  3063. return -ENODEV;
  3064. }
  3065. xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
  3066. DL_FLAG_PM_RUNTIME |
  3067. DL_FLAG_STATELESS);
  3068. if (!xudc->genpd_dl_ss) {
  3069. dev_err(dev, "failed to add SuperSpeed device link\n");
  3070. return -ENODEV;
  3071. }
  3072. return 0;
  3073. }
  3074. static int tegra_xudc_probe(struct platform_device *pdev)
  3075. {
  3076. struct tegra_xudc *xudc;
  3077. struct resource *res;
  3078. unsigned int i;
  3079. int err;
  3080. xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
  3081. if (!xudc)
  3082. return -ENOMEM;
  3083. xudc->dev = &pdev->dev;
  3084. platform_set_drvdata(pdev, xudc);
  3085. xudc->soc = of_device_get_match_data(&pdev->dev);
  3086. if (!xudc->soc)
  3087. return -ENODEV;
  3088. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  3089. xudc->base = devm_ioremap_resource(&pdev->dev, res);
  3090. if (IS_ERR(xudc->base))
  3091. return PTR_ERR(xudc->base);
  3092. xudc->phys_base = res->start;
  3093. xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
  3094. if (IS_ERR(xudc->fpci))
  3095. return PTR_ERR(xudc->fpci);
  3096. if (xudc->soc->has_ipfs) {
  3097. xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
  3098. if (IS_ERR(xudc->ipfs))
  3099. return PTR_ERR(xudc->ipfs);
  3100. }
  3101. xudc->irq = platform_get_irq(pdev, 0);
  3102. if (xudc->irq < 0)
  3103. return xudc->irq;
  3104. err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
  3105. dev_name(&pdev->dev), xudc);
  3106. if (err < 0) {
  3107. dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
  3108. err);
  3109. return err;
  3110. }
  3111. xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
  3112. GFP_KERNEL);
  3113. if (!xudc->clks)
  3114. return -ENOMEM;
  3115. for (i = 0; i < xudc->soc->num_clks; i++)
  3116. xudc->clks[i].id = xudc->soc->clock_names[i];
  3117. err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
  3118. if (err) {
  3119. dev_err_probe(xudc->dev, err, "failed to request clocks\n");
  3120. return err;
  3121. }
  3122. xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
  3123. sizeof(*xudc->supplies), GFP_KERNEL);
  3124. if (!xudc->supplies)
  3125. return -ENOMEM;
  3126. for (i = 0; i < xudc->soc->num_supplies; i++)
  3127. xudc->supplies[i].supply = xudc->soc->supply_names[i];
  3128. err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
  3129. xudc->supplies);
  3130. if (err) {
  3131. dev_err_probe(xudc->dev, err, "failed to request regulators\n");
  3132. return err;
  3133. }
  3134. xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
  3135. if (IS_ERR(xudc->padctl))
  3136. return PTR_ERR(xudc->padctl);
  3137. err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
  3138. if (err) {
  3139. dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
  3140. goto put_padctl;
  3141. }
  3142. err = tegra_xudc_phy_get(xudc);
  3143. if (err)
  3144. goto disable_regulator;
  3145. err = tegra_xudc_powerdomain_init(xudc);
  3146. if (err)
  3147. goto put_powerdomains;
  3148. err = tegra_xudc_phy_init(xudc);
  3149. if (err)
  3150. goto put_powerdomains;
  3151. err = tegra_xudc_alloc_event_ring(xudc);
  3152. if (err)
  3153. goto disable_phy;
  3154. err = tegra_xudc_alloc_eps(xudc);
  3155. if (err)
  3156. goto free_event_ring;
  3157. spin_lock_init(&xudc->lock);
  3158. init_completion(&xudc->disconnect_complete);
  3159. INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
  3160. INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
  3161. INIT_DELAYED_WORK(&xudc->port_reset_war_work,
  3162. tegra_xudc_port_reset_war_work);
  3163. pm_runtime_enable(&pdev->dev);
  3164. xudc->gadget.ops = &tegra_xudc_gadget_ops;
  3165. xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
  3166. xudc->gadget.name = "tegra-xudc";
  3167. xudc->gadget.max_speed = USB_SPEED_SUPER;
  3168. err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
  3169. if (err) {
  3170. dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
  3171. goto free_eps;
  3172. }
  3173. return 0;
  3174. free_eps:
  3175. pm_runtime_disable(&pdev->dev);
  3176. tegra_xudc_free_eps(xudc);
  3177. free_event_ring:
  3178. tegra_xudc_free_event_ring(xudc);
  3179. disable_phy:
  3180. tegra_xudc_phy_exit(xudc);
  3181. put_powerdomains:
  3182. tegra_xudc_powerdomain_remove(xudc);
  3183. disable_regulator:
  3184. regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
  3185. put_padctl:
  3186. tegra_xusb_padctl_put(xudc->padctl);
  3187. return err;
  3188. }
  3189. static int tegra_xudc_remove(struct platform_device *pdev)
  3190. {
  3191. struct tegra_xudc *xudc = platform_get_drvdata(pdev);
  3192. unsigned int i;
  3193. pm_runtime_get_sync(xudc->dev);
  3194. cancel_delayed_work_sync(&xudc->plc_reset_work);
  3195. cancel_work_sync(&xudc->usb_role_sw_work);
  3196. usb_del_gadget_udc(&xudc->gadget);
  3197. tegra_xudc_free_eps(xudc);
  3198. tegra_xudc_free_event_ring(xudc);
  3199. tegra_xudc_powerdomain_remove(xudc);
  3200. regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
  3201. for (i = 0; i < xudc->soc->num_phys; i++) {
  3202. phy_power_off(xudc->utmi_phy[i]);
  3203. phy_power_off(xudc->usb3_phy[i]);
  3204. }
  3205. tegra_xudc_phy_exit(xudc);
  3206. pm_runtime_disable(xudc->dev);
  3207. pm_runtime_put(xudc->dev);
  3208. tegra_xusb_padctl_put(xudc->padctl);
  3209. return 0;
  3210. }
  3211. static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
  3212. {
  3213. unsigned long flags;
  3214. dev_dbg(xudc->dev, "entering ELPG\n");
  3215. spin_lock_irqsave(&xudc->lock, flags);
  3216. xudc->powergated = true;
  3217. xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
  3218. xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
  3219. xudc_writel(xudc, 0, CTRL);
  3220. spin_unlock_irqrestore(&xudc->lock, flags);
  3221. clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
  3222. regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
  3223. dev_dbg(xudc->dev, "entering ELPG done\n");
  3224. return 0;
  3225. }
  3226. static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
  3227. {
  3228. unsigned long flags;
  3229. int err;
  3230. dev_dbg(xudc->dev, "exiting ELPG\n");
  3231. err = regulator_bulk_enable(xudc->soc->num_supplies,
  3232. xudc->supplies);
  3233. if (err < 0)
  3234. return err;
  3235. err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
  3236. if (err < 0)
  3237. return err;
  3238. tegra_xudc_fpci_ipfs_init(xudc);
  3239. tegra_xudc_device_params_init(xudc);
  3240. tegra_xudc_init_event_ring(xudc);
  3241. tegra_xudc_init_eps(xudc);
  3242. xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
  3243. xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
  3244. spin_lock_irqsave(&xudc->lock, flags);
  3245. xudc->powergated = false;
  3246. spin_unlock_irqrestore(&xudc->lock, flags);
  3247. dev_dbg(xudc->dev, "exiting ELPG done\n");
  3248. return 0;
  3249. }
  3250. static int __maybe_unused tegra_xudc_suspend(struct device *dev)
  3251. {
  3252. struct tegra_xudc *xudc = dev_get_drvdata(dev);
  3253. unsigned long flags;
  3254. spin_lock_irqsave(&xudc->lock, flags);
  3255. xudc->suspended = true;
  3256. spin_unlock_irqrestore(&xudc->lock, flags);
  3257. flush_work(&xudc->usb_role_sw_work);
  3258. if (!pm_runtime_status_suspended(dev)) {
  3259. /* Forcibly disconnect before powergating. */
  3260. tegra_xudc_device_mode_off(xudc);
  3261. tegra_xudc_powergate(xudc);
  3262. }
  3263. pm_runtime_disable(dev);
  3264. return 0;
  3265. }
  3266. static int __maybe_unused tegra_xudc_resume(struct device *dev)
  3267. {
  3268. struct tegra_xudc *xudc = dev_get_drvdata(dev);
  3269. unsigned long flags;
  3270. int err;
  3271. err = tegra_xudc_unpowergate(xudc);
  3272. if (err < 0)
  3273. return err;
  3274. spin_lock_irqsave(&xudc->lock, flags);
  3275. xudc->suspended = false;
  3276. spin_unlock_irqrestore(&xudc->lock, flags);
  3277. schedule_work(&xudc->usb_role_sw_work);
  3278. pm_runtime_enable(dev);
  3279. return 0;
  3280. }
  3281. static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
  3282. {
  3283. struct tegra_xudc *xudc = dev_get_drvdata(dev);
  3284. return tegra_xudc_powergate(xudc);
  3285. }
  3286. static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
  3287. {
  3288. struct tegra_xudc *xudc = dev_get_drvdata(dev);
  3289. return tegra_xudc_unpowergate(xudc);
  3290. }
  3291. static const struct dev_pm_ops tegra_xudc_pm_ops = {
  3292. SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
  3293. SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
  3294. tegra_xudc_runtime_resume, NULL)
  3295. };
  3296. static struct platform_driver tegra_xudc_driver = {
  3297. .probe = tegra_xudc_probe,
  3298. .remove = tegra_xudc_remove,
  3299. .driver = {
  3300. .name = "tegra-xudc",
  3301. .pm = &tegra_xudc_pm_ops,
  3302. .of_match_table = tegra_xudc_of_match,
  3303. },
  3304. };
  3305. module_platform_driver(tegra_xudc_driver);
  3306. MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
  3307. MODULE_AUTHOR("Andrew Bresticker <[email protected]>");
  3308. MODULE_AUTHOR("Hui Fu <[email protected]>");
  3309. MODULE_AUTHOR("Nagarjuna Kristam <[email protected]>");
  3310. MODULE_LICENSE("GPL v2");