gadget.c 122 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
  4. *
  5. * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
  6. *
  7. * Authors: Felipe Balbi <[email protected]>,
  8. * Sebastian Andrzej Siewior <[email protected]>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/delay.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/usb/ch9.h>
  21. #include <linux/usb/gadget.h>
  22. #include "debug.h"
  23. #include "core.h"
  24. #include "gadget.h"
  25. #include "io.h"
  26. #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \
  27. & ~((d)->interval - 1))
  28. /**
  29. * dwc3_gadget_set_test_mode - enables usb2 test modes
  30. * @dwc: pointer to our context structure
  31. * @mode: the mode to set (J, K SE0 NAK, Force Enable)
  32. *
  33. * Caller should take care of locking. This function will return 0 on
  34. * success or -EINVAL if wrong Test Selector is passed.
  35. */
  36. int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
  37. {
  38. u32 reg;
  39. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  40. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  41. switch (mode) {
  42. case USB_TEST_J:
  43. case USB_TEST_K:
  44. case USB_TEST_SE0_NAK:
  45. case USB_TEST_PACKET:
  46. case USB_TEST_FORCE_ENABLE:
  47. reg |= mode << 1;
  48. break;
  49. default:
  50. return -EINVAL;
  51. }
  52. dwc3_gadget_dctl_write_safe(dwc, reg);
  53. return 0;
  54. }
  55. /**
  56. * dwc3_gadget_get_link_state - gets current state of usb link
  57. * @dwc: pointer to our context structure
  58. *
  59. * Caller should take care of locking. This function will
  60. * return the link state on success (>= 0) or -ETIMEDOUT.
  61. */
  62. int dwc3_gadget_get_link_state(struct dwc3 *dwc)
  63. {
  64. u32 reg;
  65. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  66. return DWC3_DSTS_USBLNKST(reg);
  67. }
  68. /**
  69. * dwc3_gadget_set_link_state - sets usb link to a particular state
  70. * @dwc: pointer to our context structure
  71. * @state: the state to put link into
  72. *
  73. * Caller should take care of locking. This function will
  74. * return 0 on success or -ETIMEDOUT.
  75. */
  76. int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
  77. {
  78. int retries = 10000;
  79. u32 reg;
  80. /*
  81. * Wait until device controller is ready. Only applies to 1.94a and
  82. * later RTL.
  83. */
  84. if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
  85. while (--retries) {
  86. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  87. if (reg & DWC3_DSTS_DCNRD)
  88. udelay(5);
  89. else
  90. break;
  91. }
  92. if (retries <= 0)
  93. return -ETIMEDOUT;
  94. }
  95. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  96. reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
  97. /* set no action before sending new link state change */
  98. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  99. /* set requested state */
  100. reg |= DWC3_DCTL_ULSTCHNGREQ(state);
  101. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  102. /*
  103. * The following code is racy when called from dwc3_gadget_wakeup,
  104. * and is not needed, at least on newer versions
  105. */
  106. if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
  107. return 0;
  108. /* wait for a change in DSTS */
  109. retries = 10000;
  110. while (--retries) {
  111. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  112. if (DWC3_DSTS_USBLNKST(reg) == state)
  113. return 0;
  114. udelay(5);
  115. }
  116. return -ETIMEDOUT;
  117. }
  118. static void dwc3_ep0_reset_state(struct dwc3 *dwc)
  119. {
  120. unsigned int dir;
  121. if (dwc->ep0state != EP0_SETUP_PHASE) {
  122. dir = !!dwc->ep0_expect_in;
  123. if (dwc->ep0state == EP0_DATA_PHASE)
  124. dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
  125. else
  126. dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
  127. dwc->eps[0]->trb_enqueue = 0;
  128. dwc->eps[1]->trb_enqueue = 0;
  129. dwc3_ep0_stall_and_restart(dwc);
  130. }
  131. }
  132. /**
  133. * dwc3_ep_inc_trb - increment a trb index.
  134. * @index: Pointer to the TRB index to increment.
  135. *
  136. * The index should never point to the link TRB. After incrementing,
  137. * if it is point to the link TRB, wrap around to the beginning. The
  138. * link TRB is always at the last TRB entry.
  139. */
  140. static void dwc3_ep_inc_trb(u8 *index)
  141. {
  142. (*index)++;
  143. if (*index == (DWC3_TRB_NUM - 1))
  144. *index = 0;
  145. }
  146. /**
  147. * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
  148. * @dep: The endpoint whose enqueue pointer we're incrementing
  149. */
  150. static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
  151. {
  152. dwc3_ep_inc_trb(&dep->trb_enqueue);
  153. }
  154. /**
  155. * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
  156. * @dep: The endpoint whose enqueue pointer we're incrementing
  157. */
  158. static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
  159. {
  160. dwc3_ep_inc_trb(&dep->trb_dequeue);
  161. }
  162. static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
  163. struct dwc3_request *req, int status)
  164. {
  165. struct dwc3 *dwc = dep->dwc;
  166. list_del(&req->list);
  167. req->remaining = 0;
  168. req->needs_extra_trb = false;
  169. req->num_trbs = 0;
  170. if (req->request.status == -EINPROGRESS)
  171. req->request.status = status;
  172. if (req->trb)
  173. usb_gadget_unmap_request_by_dev(dwc->sysdev,
  174. &req->request, req->direction);
  175. req->trb = NULL;
  176. trace_dwc3_gadget_giveback(req);
  177. if (dep->number > 1)
  178. pm_runtime_put(dwc->dev);
  179. }
  180. /**
  181. * dwc3_gadget_giveback - call struct usb_request's ->complete callback
  182. * @dep: The endpoint to whom the request belongs to
  183. * @req: The request we're giving back
  184. * @status: completion code for the request
  185. *
  186. * Must be called with controller's lock held and interrupts disabled. This
  187. * function will unmap @req and call its ->complete() callback to notify upper
  188. * layers that it has completed.
  189. */
  190. void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
  191. int status)
  192. {
  193. struct dwc3 *dwc = dep->dwc;
  194. dwc3_gadget_del_and_unmap_request(dep, req, status);
  195. req->status = DWC3_REQUEST_STATUS_COMPLETED;
  196. spin_unlock(&dwc->lock);
  197. usb_gadget_giveback_request(&dep->endpoint, &req->request);
  198. spin_lock(&dwc->lock);
  199. }
  200. /**
  201. * dwc3_send_gadget_generic_command - issue a generic command for the controller
  202. * @dwc: pointer to the controller context
  203. * @cmd: the command to be issued
  204. * @param: command parameter
  205. *
  206. * Caller should take care of locking. Issue @cmd with a given @param to @dwc
  207. * and wait for its completion.
  208. */
  209. int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
  210. u32 param)
  211. {
  212. u32 timeout = 500;
  213. int status = 0;
  214. int ret = 0;
  215. u32 reg;
  216. dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
  217. dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
  218. do {
  219. reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
  220. if (!(reg & DWC3_DGCMD_CMDACT)) {
  221. status = DWC3_DGCMD_STATUS(reg);
  222. if (status)
  223. ret = -EINVAL;
  224. break;
  225. }
  226. } while (--timeout);
  227. if (!timeout) {
  228. ret = -ETIMEDOUT;
  229. status = -ETIMEDOUT;
  230. }
  231. trace_dwc3_gadget_generic_cmd(cmd, param, status);
  232. return ret;
  233. }
  234. static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
  235. /**
  236. * dwc3_send_gadget_ep_cmd - issue an endpoint command
  237. * @dep: the endpoint to which the command is going to be issued
  238. * @cmd: the command to be issued
  239. * @params: parameters to the command
  240. *
  241. * Caller should handle locking. This function will issue @cmd with given
  242. * @params to @dep and wait for its completion.
  243. */
  244. int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
  245. struct dwc3_gadget_ep_cmd_params *params)
  246. {
  247. const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
  248. struct dwc3 *dwc = dep->dwc;
  249. u32 timeout = 5000;
  250. u32 saved_config = 0;
  251. u32 reg;
  252. int cmd_status = 0;
  253. int ret = -EINVAL;
  254. /*
  255. * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
  256. * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
  257. * endpoint command.
  258. *
  259. * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
  260. * settings. Restore them after the command is completed.
  261. *
  262. * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
  263. */
  264. if (dwc->gadget->speed <= USB_SPEED_HIGH ||
  265. DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
  266. reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
  267. if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
  268. saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
  269. reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
  270. }
  271. if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
  272. saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
  273. reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
  274. }
  275. if (saved_config)
  276. dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
  277. }
  278. if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
  279. int link_state;
  280. /*
  281. * Initiate remote wakeup if the link state is in U3 when
  282. * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
  283. * link state is in U1/U2, no remote wakeup is needed. The Start
  284. * Transfer command will initiate the link recovery.
  285. */
  286. link_state = dwc3_gadget_get_link_state(dwc);
  287. switch (link_state) {
  288. case DWC3_LINK_STATE_U2:
  289. if (dwc->gadget->speed >= USB_SPEED_SUPER)
  290. break;
  291. fallthrough;
  292. case DWC3_LINK_STATE_U3:
  293. ret = __dwc3_gadget_wakeup(dwc);
  294. dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
  295. ret);
  296. break;
  297. }
  298. }
  299. /*
  300. * For some commands such as Update Transfer command, DEPCMDPARn
  301. * registers are reserved. Since the driver often sends Update Transfer
  302. * command, don't write to DEPCMDPARn to avoid register write delays and
  303. * improve performance.
  304. */
  305. if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) {
  306. dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
  307. dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
  308. dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
  309. }
  310. /*
  311. * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
  312. * not relying on XferNotReady, we can make use of a special "No
  313. * Response Update Transfer" command where we should clear both CmdAct
  314. * and CmdIOC bits.
  315. *
  316. * With this, we don't need to wait for command completion and can
  317. * straight away issue further commands to the endpoint.
  318. *
  319. * NOTICE: We're making an assumption that control endpoints will never
  320. * make use of Update Transfer command. This is a safe assumption
  321. * because we can never have more than one request at a time with
  322. * Control Endpoints. If anybody changes that assumption, this chunk
  323. * needs to be updated accordingly.
  324. */
  325. if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
  326. !usb_endpoint_xfer_isoc(desc))
  327. cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
  328. else
  329. cmd |= DWC3_DEPCMD_CMDACT;
  330. dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
  331. if (!(cmd & DWC3_DEPCMD_CMDACT) ||
  332. (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
  333. !(cmd & DWC3_DEPCMD_CMDIOC))) {
  334. ret = 0;
  335. goto skip_status;
  336. }
  337. do {
  338. reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
  339. if (!(reg & DWC3_DEPCMD_CMDACT)) {
  340. cmd_status = DWC3_DEPCMD_STATUS(reg);
  341. switch (cmd_status) {
  342. case 0:
  343. ret = 0;
  344. break;
  345. case DEPEVT_TRANSFER_NO_RESOURCE:
  346. dev_WARN(dwc->dev, "No resource for %s\n",
  347. dep->name);
  348. ret = -EINVAL;
  349. break;
  350. case DEPEVT_TRANSFER_BUS_EXPIRY:
  351. /*
  352. * SW issues START TRANSFER command to
  353. * isochronous ep with future frame interval. If
  354. * future interval time has already passed when
  355. * core receives the command, it will respond
  356. * with an error status of 'Bus Expiry'.
  357. *
  358. * Instead of always returning -EINVAL, let's
  359. * give a hint to the gadget driver that this is
  360. * the case by returning -EAGAIN.
  361. */
  362. ret = -EAGAIN;
  363. break;
  364. default:
  365. dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
  366. }
  367. break;
  368. }
  369. } while (--timeout);
  370. if (timeout == 0) {
  371. ret = -ETIMEDOUT;
  372. cmd_status = -ETIMEDOUT;
  373. }
  374. skip_status:
  375. trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
  376. if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
  377. if (ret == 0)
  378. dep->flags |= DWC3_EP_TRANSFER_STARTED;
  379. if (ret != -ETIMEDOUT)
  380. dwc3_gadget_ep_get_transfer_index(dep);
  381. }
  382. if (saved_config) {
  383. reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
  384. reg |= saved_config;
  385. dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
  386. }
  387. return ret;
  388. }
  389. static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
  390. {
  391. struct dwc3 *dwc = dep->dwc;
  392. struct dwc3_gadget_ep_cmd_params params;
  393. u32 cmd = DWC3_DEPCMD_CLEARSTALL;
  394. /*
  395. * As of core revision 2.60a the recommended programming model
  396. * is to set the ClearPendIN bit when issuing a Clear Stall EP
  397. * command for IN endpoints. This is to prevent an issue where
  398. * some (non-compliant) hosts may not send ACK TPs for pending
  399. * IN transfers due to a mishandled error condition. Synopsys
  400. * STAR 9000614252.
  401. */
  402. if (dep->direction &&
  403. !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
  404. (dwc->gadget->speed >= USB_SPEED_SUPER))
  405. cmd |= DWC3_DEPCMD_CLEARPENDIN;
  406. memset(&params, 0, sizeof(params));
  407. return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  408. }
  409. static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
  410. struct dwc3_trb *trb)
  411. {
  412. u32 offset = (char *) trb - (char *) dep->trb_pool;
  413. return dep->trb_pool_dma + offset;
  414. }
  415. static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
  416. {
  417. struct dwc3 *dwc = dep->dwc;
  418. if (dep->trb_pool)
  419. return 0;
  420. dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
  421. sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  422. &dep->trb_pool_dma, GFP_KERNEL);
  423. if (!dep->trb_pool) {
  424. dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
  425. dep->name);
  426. return -ENOMEM;
  427. }
  428. return 0;
  429. }
  430. static void dwc3_free_trb_pool(struct dwc3_ep *dep)
  431. {
  432. struct dwc3 *dwc = dep->dwc;
  433. dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  434. dep->trb_pool, dep->trb_pool_dma);
  435. dep->trb_pool = NULL;
  436. dep->trb_pool_dma = 0;
  437. }
  438. static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
  439. {
  440. struct dwc3_gadget_ep_cmd_params params;
  441. memset(&params, 0x00, sizeof(params));
  442. params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
  443. return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
  444. &params);
  445. }
  446. /**
  447. * dwc3_gadget_start_config - configure ep resources
  448. * @dep: endpoint that is being enabled
  449. *
  450. * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
  451. * completion, it will set Transfer Resource for all available endpoints.
  452. *
  453. * The assignment of transfer resources cannot perfectly follow the data book
  454. * due to the fact that the controller driver does not have all knowledge of the
  455. * configuration in advance. It is given this information piecemeal by the
  456. * composite gadget framework after every SET_CONFIGURATION and
  457. * SET_INTERFACE. Trying to follow the databook programming model in this
  458. * scenario can cause errors. For two reasons:
  459. *
  460. * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
  461. * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
  462. * incorrect in the scenario of multiple interfaces.
  463. *
  464. * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
  465. * endpoint on alt setting (8.1.6).
  466. *
  467. * The following simplified method is used instead:
  468. *
  469. * All hardware endpoints can be assigned a transfer resource and this setting
  470. * will stay persistent until either a core reset or hibernation. So whenever we
  471. * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
  472. * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
  473. * guaranteed that there are as many transfer resources as endpoints.
  474. *
  475. * This function is called for each endpoint when it is being enabled but is
  476. * triggered only when called for EP0-out, which always happens first, and which
  477. * should only happen in one of the above conditions.
  478. */
  479. static int dwc3_gadget_start_config(struct dwc3_ep *dep)
  480. {
  481. struct dwc3_gadget_ep_cmd_params params;
  482. struct dwc3 *dwc;
  483. u32 cmd;
  484. int i;
  485. int ret;
  486. if (dep->number)
  487. return 0;
  488. memset(&params, 0x00, sizeof(params));
  489. cmd = DWC3_DEPCMD_DEPSTARTCFG;
  490. dwc = dep->dwc;
  491. ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  492. if (ret)
  493. return ret;
  494. for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
  495. struct dwc3_ep *dep = dwc->eps[i];
  496. if (!dep)
  497. continue;
  498. ret = dwc3_gadget_set_xfer_resource(dep);
  499. if (ret)
  500. return ret;
  501. }
  502. return 0;
  503. }
  504. static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
  505. {
  506. const struct usb_ss_ep_comp_descriptor *comp_desc;
  507. const struct usb_endpoint_descriptor *desc;
  508. struct dwc3_gadget_ep_cmd_params params;
  509. struct dwc3 *dwc = dep->dwc;
  510. comp_desc = dep->endpoint.comp_desc;
  511. desc = dep->endpoint.desc;
  512. memset(&params, 0x00, sizeof(params));
  513. params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
  514. | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
  515. /* Burst size is only needed in SuperSpeed mode */
  516. if (dwc->gadget->speed >= USB_SPEED_SUPER) {
  517. u32 burst = dep->endpoint.maxburst;
  518. params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
  519. }
  520. params.param0 |= action;
  521. if (action == DWC3_DEPCFG_ACTION_RESTORE)
  522. params.param2 |= dep->saved_state;
  523. if (usb_endpoint_xfer_control(desc))
  524. params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
  525. if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
  526. params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
  527. if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
  528. params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
  529. | DWC3_DEPCFG_XFER_COMPLETE_EN
  530. | DWC3_DEPCFG_STREAM_EVENT_EN;
  531. dep->stream_capable = true;
  532. }
  533. if (!usb_endpoint_xfer_control(desc))
  534. params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
  535. /*
  536. * We are doing 1:1 mapping for endpoints, meaning
  537. * Physical Endpoints 2 maps to Logical Endpoint 2 and
  538. * so on. We consider the direction bit as part of the physical
  539. * endpoint number. So USB endpoint 0x81 is 0x03.
  540. */
  541. params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
  542. /*
  543. * We must use the lower 16 TX FIFOs even though
  544. * HW might have more
  545. */
  546. if (dep->direction)
  547. params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
  548. if (desc->bInterval) {
  549. u8 bInterval_m1;
  550. /*
  551. * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
  552. *
  553. * NOTE: The programming guide incorrectly stated bInterval_m1
  554. * must be set to 0 when operating in fullspeed. Internally the
  555. * controller does not have this limitation. See DWC_usb3x
  556. * programming guide section 3.2.2.1.
  557. */
  558. bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
  559. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
  560. dwc->gadget->speed == USB_SPEED_FULL)
  561. dep->interval = desc->bInterval;
  562. else
  563. dep->interval = 1 << (desc->bInterval - 1);
  564. params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
  565. }
  566. return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
  567. }
  568. /**
  569. * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
  570. * @dwc: pointer to the DWC3 context
  571. * @mult: multiplier to be used when calculating the fifo_size
  572. *
  573. * Calculates the size value based on the equation below:
  574. *
  575. * DWC3 revision 280A and prior:
  576. * fifo_size = mult * (max_packet / mdwidth) + 1;
  577. *
  578. * DWC3 revision 290A and onwards:
  579. * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
  580. *
  581. * The max packet size is set to 1024, as the txfifo requirements mainly apply
  582. * to super speed USB use cases. However, it is safe to overestimate the fifo
  583. * allocations for other scenarios, i.e. high speed USB.
  584. */
  585. static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
  586. {
  587. int max_packet = 1024;
  588. int fifo_size;
  589. int mdwidth;
  590. mdwidth = dwc3_mdwidth(dwc);
  591. /* MDWIDTH is represented in bits, we need it in bytes */
  592. mdwidth >>= 3;
  593. if (DWC3_VER_IS_PRIOR(DWC3, 290A))
  594. fifo_size = mult * (max_packet / mdwidth) + 1;
  595. else
  596. fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
  597. return fifo_size;
  598. }
  599. /**
  600. * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
  601. * @dwc: pointer to the DWC3 context
  602. *
  603. * Iterates through all the endpoint registers and clears the previous txfifo
  604. * allocations.
  605. */
  606. void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
  607. {
  608. struct dwc3_ep *dep;
  609. int fifo_depth;
  610. int size;
  611. int num;
  612. if (!dwc->do_fifo_resize)
  613. return;
  614. /* Read ep0IN related TXFIFO size */
  615. dep = dwc->eps[1];
  616. size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
  617. if (DWC3_IP_IS(DWC3))
  618. fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
  619. else
  620. fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
  621. dwc->last_fifo_depth = fifo_depth;
  622. /* Clear existing TXFIFO for all IN eps except ep0 */
  623. for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
  624. num += 2) {
  625. dep = dwc->eps[num];
  626. /* Don't change TXFRAMNUM on usb31 version */
  627. size = DWC3_IP_IS(DWC3) ? 0 :
  628. dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
  629. DWC31_GTXFIFOSIZ_TXFRAMNUM;
  630. dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
  631. dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
  632. }
  633. dwc->num_ep_resized = 0;
  634. }
  635. /*
  636. * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
  637. * @dwc: pointer to our context structure
  638. *
  639. * This function will a best effort FIFO allocation in order
  640. * to improve FIFO usage and throughput, while still allowing
  641. * us to enable as many endpoints as possible.
  642. *
  643. * Keep in mind that this operation will be highly dependent
  644. * on the configured size for RAM1 - which contains TxFifo -,
  645. * the amount of endpoints enabled on coreConsultant tool, and
  646. * the width of the Master Bus.
  647. *
  648. * In general, FIFO depths are represented with the following equation:
  649. *
  650. * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
  651. *
  652. * In conjunction with dwc3_gadget_check_config(), this resizing logic will
  653. * ensure that all endpoints will have enough internal memory for one max
  654. * packet per endpoint.
  655. */
  656. static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
  657. {
  658. struct dwc3 *dwc = dep->dwc;
  659. int fifo_0_start;
  660. int ram1_depth;
  661. int fifo_size;
  662. int min_depth;
  663. int num_in_ep;
  664. int remaining;
  665. int num_fifos = 1;
  666. int fifo;
  667. int tmp;
  668. if (!dwc->do_fifo_resize)
  669. return 0;
  670. /* resize IN endpoints except ep0 */
  671. if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
  672. return 0;
  673. /* bail if already resized */
  674. if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
  675. return 0;
  676. ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
  677. if ((dep->endpoint.maxburst > 1 &&
  678. usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
  679. usb_endpoint_xfer_isoc(dep->endpoint.desc))
  680. num_fifos = 3;
  681. if (dep->endpoint.maxburst > 6 &&
  682. (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
  683. usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
  684. num_fifos = dwc->tx_fifo_resize_max_num;
  685. /* FIFO size for a single buffer */
  686. fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
  687. /* Calculate the number of remaining EPs w/o any FIFO */
  688. num_in_ep = dwc->max_cfg_eps;
  689. num_in_ep -= dwc->num_ep_resized;
  690. /* Reserve at least one FIFO for the number of IN EPs */
  691. min_depth = num_in_ep * (fifo + 1);
  692. remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
  693. remaining = max_t(int, 0, remaining);
  694. /*
  695. * We've already reserved 1 FIFO per EP, so check what we can fit in
  696. * addition to it. If there is not enough remaining space, allocate
  697. * all the remaining space to the EP.
  698. */
  699. fifo_size = (num_fifos - 1) * fifo;
  700. if (remaining < fifo_size)
  701. fifo_size = remaining;
  702. fifo_size += fifo;
  703. /* Last increment according to the TX FIFO size equation */
  704. fifo_size++;
  705. /* Check if TXFIFOs start at non-zero addr */
  706. tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
  707. fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
  708. fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
  709. if (DWC3_IP_IS(DWC3))
  710. dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
  711. else
  712. dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
  713. /* Check fifo size allocation doesn't exceed available RAM size. */
  714. if (dwc->last_fifo_depth >= ram1_depth) {
  715. dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
  716. dwc->last_fifo_depth, ram1_depth,
  717. dep->endpoint.name, fifo_size);
  718. if (DWC3_IP_IS(DWC3))
  719. fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
  720. else
  721. fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
  722. dwc->last_fifo_depth -= fifo_size;
  723. return -ENOMEM;
  724. }
  725. dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
  726. dep->flags |= DWC3_EP_TXFIFO_RESIZED;
  727. dwc->num_ep_resized++;
  728. return 0;
  729. }
  730. /**
  731. * __dwc3_gadget_ep_enable - initializes a hw endpoint
  732. * @dep: endpoint to be initialized
  733. * @action: one of INIT, MODIFY or RESTORE
  734. *
  735. * Caller should take care of locking. Execute all necessary commands to
  736. * initialize a HW endpoint so it can be used by a gadget driver.
  737. */
  738. static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
  739. {
  740. const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
  741. struct dwc3 *dwc = dep->dwc;
  742. u32 reg;
  743. int ret;
  744. if (!(dep->flags & DWC3_EP_ENABLED)) {
  745. ret = dwc3_gadget_resize_tx_fifos(dep);
  746. if (ret)
  747. return ret;
  748. ret = dwc3_gadget_start_config(dep);
  749. if (ret)
  750. return ret;
  751. }
  752. ret = dwc3_gadget_set_ep_config(dep, action);
  753. if (ret)
  754. return ret;
  755. if (!(dep->flags & DWC3_EP_ENABLED)) {
  756. struct dwc3_trb *trb_st_hw;
  757. struct dwc3_trb *trb_link;
  758. dep->type = usb_endpoint_type(desc);
  759. dep->flags |= DWC3_EP_ENABLED;
  760. reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  761. reg |= DWC3_DALEPENA_EP(dep->number);
  762. dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  763. dep->trb_dequeue = 0;
  764. dep->trb_enqueue = 0;
  765. if (usb_endpoint_xfer_control(desc))
  766. goto out;
  767. /* Initialize the TRB ring */
  768. memset(dep->trb_pool, 0,
  769. sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
  770. /* Link TRB. The HWO bit is never reset */
  771. trb_st_hw = &dep->trb_pool[0];
  772. trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
  773. trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
  774. trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
  775. trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
  776. trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
  777. }
  778. /*
  779. * Issue StartTransfer here with no-op TRB so we can always rely on No
  780. * Response Update Transfer command.
  781. */
  782. if (usb_endpoint_xfer_bulk(desc) ||
  783. usb_endpoint_xfer_int(desc)) {
  784. struct dwc3_gadget_ep_cmd_params params;
  785. struct dwc3_trb *trb;
  786. dma_addr_t trb_dma;
  787. u32 cmd;
  788. memset(&params, 0, sizeof(params));
  789. trb = &dep->trb_pool[0];
  790. trb_dma = dwc3_trb_dma_offset(dep, trb);
  791. params.param0 = upper_32_bits(trb_dma);
  792. params.param1 = lower_32_bits(trb_dma);
  793. cmd = DWC3_DEPCMD_STARTTRANSFER;
  794. ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  795. if (ret < 0)
  796. return ret;
  797. if (dep->stream_capable) {
  798. /*
  799. * For streams, at start, there maybe a race where the
  800. * host primes the endpoint before the function driver
  801. * queues a request to initiate a stream. In that case,
  802. * the controller will not see the prime to generate the
  803. * ERDY and start stream. To workaround this, issue a
  804. * no-op TRB as normal, but end it immediately. As a
  805. * result, when the function driver queues the request,
  806. * the next START_TRANSFER command will cause the
  807. * controller to generate an ERDY to initiate the
  808. * stream.
  809. */
  810. dwc3_stop_active_transfer(dep, true, true);
  811. /*
  812. * All stream eps will reinitiate stream on NoStream
  813. * rejection until we can determine that the host can
  814. * prime after the first transfer.
  815. *
  816. * However, if the controller is capable of
  817. * TXF_FLUSH_BYPASS, then IN direction endpoints will
  818. * automatically restart the stream without the driver
  819. * initiation.
  820. */
  821. if (!dep->direction ||
  822. !(dwc->hwparams.hwparams9 &
  823. DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
  824. dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
  825. }
  826. }
  827. out:
  828. trace_dwc3_gadget_ep_enable(dep);
  829. return 0;
  830. }
  831. void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
  832. {
  833. struct dwc3_request *req;
  834. dwc3_stop_active_transfer(dep, true, false);
  835. /* If endxfer is delayed, avoid unmapping requests */
  836. if (dep->flags & DWC3_EP_DELAY_STOP)
  837. return;
  838. /* - giveback all requests to gadget driver */
  839. while (!list_empty(&dep->started_list)) {
  840. req = next_request(&dep->started_list);
  841. dwc3_gadget_giveback(dep, req, status);
  842. }
  843. while (!list_empty(&dep->pending_list)) {
  844. req = next_request(&dep->pending_list);
  845. dwc3_gadget_giveback(dep, req, status);
  846. }
  847. while (!list_empty(&dep->cancelled_list)) {
  848. req = next_request(&dep->cancelled_list);
  849. dwc3_gadget_giveback(dep, req, status);
  850. }
  851. }
  852. /**
  853. * __dwc3_gadget_ep_disable - disables a hw endpoint
  854. * @dep: the endpoint to disable
  855. *
  856. * This function undoes what __dwc3_gadget_ep_enable did and also removes
  857. * requests which are currently being processed by the hardware and those which
  858. * are not yet scheduled.
  859. *
  860. * Caller should take care of locking.
  861. */
  862. static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
  863. {
  864. struct dwc3 *dwc = dep->dwc;
  865. u32 reg;
  866. u32 mask;
  867. trace_dwc3_gadget_ep_disable(dep);
  868. /* make sure HW endpoint isn't stalled */
  869. if (dep->flags & DWC3_EP_STALL)
  870. __dwc3_gadget_ep_set_halt(dep, 0, false);
  871. reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  872. reg &= ~DWC3_DALEPENA_EP(dep->number);
  873. dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  874. dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
  875. dep->stream_capable = false;
  876. dep->type = 0;
  877. mask = DWC3_EP_TXFIFO_RESIZED;
  878. /*
  879. * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
  880. * set. Do not clear DEP flags, so that the end transfer command will
  881. * be reattempted during the next SETUP stage.
  882. */
  883. if (dep->flags & DWC3_EP_DELAY_STOP)
  884. mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
  885. dep->flags &= mask;
  886. /* Clear out the ep descriptors for non-ep0 */
  887. if (dep->number > 1) {
  888. dep->endpoint.comp_desc = NULL;
  889. dep->endpoint.desc = NULL;
  890. }
  891. return 0;
  892. }
  893. /* -------------------------------------------------------------------------- */
  894. static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
  895. const struct usb_endpoint_descriptor *desc)
  896. {
  897. return -EINVAL;
  898. }
  899. static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
  900. {
  901. return -EINVAL;
  902. }
  903. /* -------------------------------------------------------------------------- */
  904. static int dwc3_gadget_ep_enable(struct usb_ep *ep,
  905. const struct usb_endpoint_descriptor *desc)
  906. {
  907. struct dwc3_ep *dep;
  908. struct dwc3 *dwc;
  909. unsigned long flags;
  910. int ret;
  911. if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  912. pr_debug("dwc3: invalid parameters\n");
  913. return -EINVAL;
  914. }
  915. if (!desc->wMaxPacketSize) {
  916. pr_debug("dwc3: missing wMaxPacketSize\n");
  917. return -EINVAL;
  918. }
  919. dep = to_dwc3_ep(ep);
  920. dwc = dep->dwc;
  921. if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
  922. "%s is already enabled\n",
  923. dep->name))
  924. return 0;
  925. spin_lock_irqsave(&dwc->lock, flags);
  926. ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
  927. spin_unlock_irqrestore(&dwc->lock, flags);
  928. return ret;
  929. }
  930. static int dwc3_gadget_ep_disable(struct usb_ep *ep)
  931. {
  932. struct dwc3_ep *dep;
  933. struct dwc3 *dwc;
  934. unsigned long flags;
  935. int ret;
  936. if (!ep) {
  937. pr_debug("dwc3: invalid parameters\n");
  938. return -EINVAL;
  939. }
  940. dep = to_dwc3_ep(ep);
  941. dwc = dep->dwc;
  942. if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
  943. "%s is already disabled\n",
  944. dep->name))
  945. return 0;
  946. spin_lock_irqsave(&dwc->lock, flags);
  947. ret = __dwc3_gadget_ep_disable(dep);
  948. spin_unlock_irqrestore(&dwc->lock, flags);
  949. return ret;
  950. }
  951. static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
  952. gfp_t gfp_flags)
  953. {
  954. struct dwc3_request *req;
  955. struct dwc3_ep *dep = to_dwc3_ep(ep);
  956. req = kzalloc(sizeof(*req), gfp_flags);
  957. if (!req)
  958. return NULL;
  959. req->direction = dep->direction;
  960. req->epnum = dep->number;
  961. req->dep = dep;
  962. req->status = DWC3_REQUEST_STATUS_UNKNOWN;
  963. trace_dwc3_alloc_request(req);
  964. return &req->request;
  965. }
  966. static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
  967. struct usb_request *request)
  968. {
  969. struct dwc3_request *req = to_dwc3_request(request);
  970. trace_dwc3_free_request(req);
  971. kfree(req);
  972. }
  973. /**
  974. * dwc3_ep_prev_trb - returns the previous TRB in the ring
  975. * @dep: The endpoint with the TRB ring
  976. * @index: The index of the current TRB in the ring
  977. *
  978. * Returns the TRB prior to the one pointed to by the index. If the
  979. * index is 0, we will wrap backwards, skip the link TRB, and return
  980. * the one just before that.
  981. */
  982. static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
  983. {
  984. u8 tmp = index;
  985. if (!tmp)
  986. tmp = DWC3_TRB_NUM - 1;
  987. return &dep->trb_pool[tmp - 1];
  988. }
  989. static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
  990. {
  991. u8 trbs_left;
  992. /*
  993. * If the enqueue & dequeue are equal then the TRB ring is either full
  994. * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
  995. * pending to be processed by the driver.
  996. */
  997. if (dep->trb_enqueue == dep->trb_dequeue) {
  998. /*
  999. * If there is any request remained in the started_list at
  1000. * this point, that means there is no TRB available.
  1001. */
  1002. if (!list_empty(&dep->started_list))
  1003. return 0;
  1004. return DWC3_TRB_NUM - 1;
  1005. }
  1006. trbs_left = dep->trb_dequeue - dep->trb_enqueue;
  1007. trbs_left &= (DWC3_TRB_NUM - 1);
  1008. if (dep->trb_dequeue < dep->trb_enqueue)
  1009. trbs_left--;
  1010. return trbs_left;
  1011. }
  1012. /**
  1013. * dwc3_prepare_one_trb - setup one TRB from one request
  1014. * @dep: endpoint for which this request is prepared
  1015. * @req: dwc3_request pointer
  1016. * @trb_length: buffer size of the TRB
  1017. * @chain: should this TRB be chained to the next?
  1018. * @node: only for isochronous endpoints. First TRB needs different type.
  1019. * @use_bounce_buffer: set to use bounce buffer
  1020. * @must_interrupt: set to interrupt on TRB completion
  1021. */
  1022. static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
  1023. struct dwc3_request *req, unsigned int trb_length,
  1024. unsigned int chain, unsigned int node, bool use_bounce_buffer,
  1025. bool must_interrupt)
  1026. {
  1027. struct dwc3_trb *trb;
  1028. dma_addr_t dma;
  1029. unsigned int stream_id = req->request.stream_id;
  1030. unsigned int short_not_ok = req->request.short_not_ok;
  1031. unsigned int no_interrupt = req->request.no_interrupt;
  1032. unsigned int is_last = req->request.is_last;
  1033. struct dwc3 *dwc = dep->dwc;
  1034. struct usb_gadget *gadget = dwc->gadget;
  1035. enum usb_device_speed speed = gadget->speed;
  1036. if (use_bounce_buffer)
  1037. dma = dep->dwc->bounce_addr;
  1038. else if (req->request.num_sgs > 0)
  1039. dma = sg_dma_address(req->start_sg);
  1040. else
  1041. dma = req->request.dma;
  1042. trb = &dep->trb_pool[dep->trb_enqueue];
  1043. if (!req->trb) {
  1044. dwc3_gadget_move_started_request(req);
  1045. req->trb = trb;
  1046. req->trb_dma = dwc3_trb_dma_offset(dep, trb);
  1047. }
  1048. req->num_trbs++;
  1049. trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
  1050. trb->bpl = lower_32_bits(dma);
  1051. trb->bph = upper_32_bits(dma);
  1052. switch (usb_endpoint_type(dep->endpoint.desc)) {
  1053. case USB_ENDPOINT_XFER_CONTROL:
  1054. trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
  1055. break;
  1056. case USB_ENDPOINT_XFER_ISOC:
  1057. if (!node) {
  1058. trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
  1059. /*
  1060. * USB Specification 2.0 Section 5.9.2 states that: "If
  1061. * there is only a single transaction in the microframe,
  1062. * only a DATA0 data packet PID is used. If there are
  1063. * two transactions per microframe, DATA1 is used for
  1064. * the first transaction data packet and DATA0 is used
  1065. * for the second transaction data packet. If there are
  1066. * three transactions per microframe, DATA2 is used for
  1067. * the first transaction data packet, DATA1 is used for
  1068. * the second, and DATA0 is used for the third."
  1069. *
  1070. * IOW, we should satisfy the following cases:
  1071. *
  1072. * 1) length <= maxpacket
  1073. * - DATA0
  1074. *
  1075. * 2) maxpacket < length <= (2 * maxpacket)
  1076. * - DATA1, DATA0
  1077. *
  1078. * 3) (2 * maxpacket) < length <= (3 * maxpacket)
  1079. * - DATA2, DATA1, DATA0
  1080. */
  1081. if (speed == USB_SPEED_HIGH) {
  1082. struct usb_ep *ep = &dep->endpoint;
  1083. unsigned int mult = 2;
  1084. unsigned int maxp = usb_endpoint_maxp(ep->desc);
  1085. if (req->request.length <= (2 * maxp))
  1086. mult--;
  1087. if (req->request.length <= maxp)
  1088. mult--;
  1089. trb->size |= DWC3_TRB_SIZE_PCM1(mult);
  1090. }
  1091. } else {
  1092. trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
  1093. }
  1094. if (!no_interrupt && !chain)
  1095. trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
  1096. break;
  1097. case USB_ENDPOINT_XFER_BULK:
  1098. case USB_ENDPOINT_XFER_INT:
  1099. trb->ctrl = DWC3_TRBCTL_NORMAL;
  1100. break;
  1101. default:
  1102. /*
  1103. * This is only possible with faulty memory because we
  1104. * checked it already :)
  1105. */
  1106. dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
  1107. usb_endpoint_type(dep->endpoint.desc));
  1108. }
  1109. /*
  1110. * Enable Continue on Short Packet
  1111. * when endpoint is not a stream capable
  1112. */
  1113. if (usb_endpoint_dir_out(dep->endpoint.desc)) {
  1114. if (!dep->stream_capable)
  1115. trb->ctrl |= DWC3_TRB_CTRL_CSP;
  1116. if (short_not_ok)
  1117. trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
  1118. }
  1119. /* All TRBs setup for MST must set CSP=1 when LST=0 */
  1120. if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams))
  1121. trb->ctrl |= DWC3_TRB_CTRL_CSP;
  1122. if ((!no_interrupt && !chain) || must_interrupt)
  1123. trb->ctrl |= DWC3_TRB_CTRL_IOC;
  1124. if (chain)
  1125. trb->ctrl |= DWC3_TRB_CTRL_CHN;
  1126. else if (dep->stream_capable && is_last &&
  1127. !DWC3_MST_CAPABLE(&dwc->hwparams))
  1128. trb->ctrl |= DWC3_TRB_CTRL_LST;
  1129. if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
  1130. trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
  1131. /*
  1132. * As per data book 4.2.3.2TRB Control Bit Rules section
  1133. *
  1134. * The controller autonomously checks the HWO field of a TRB to determine if the
  1135. * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
  1136. * is valid before setting the HWO field to '1'. In most systems, this means that
  1137. * software must update the fourth DWORD of a TRB last.
  1138. *
  1139. * However there is a possibility of CPU re-ordering here which can cause
  1140. * controller to observe the HWO bit set prematurely.
  1141. * Add a write memory barrier to prevent CPU re-ordering.
  1142. */
  1143. wmb();
  1144. trb->ctrl |= DWC3_TRB_CTRL_HWO;
  1145. dwc3_ep_inc_enq(dep);
  1146. trace_dwc3_prepare_trb(dep, trb);
  1147. }
  1148. static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
  1149. {
  1150. unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
  1151. unsigned int rem = req->request.length % maxp;
  1152. if ((req->request.length && req->request.zero && !rem &&
  1153. !usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
  1154. (!req->direction && rem))
  1155. return true;
  1156. return false;
  1157. }
  1158. /**
  1159. * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
  1160. * @dep: The endpoint that the request belongs to
  1161. * @req: The request to prepare
  1162. * @entry_length: The last SG entry size
  1163. * @node: Indicates whether this is not the first entry (for isoc only)
  1164. *
  1165. * Return the number of TRBs prepared.
  1166. */
  1167. static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
  1168. struct dwc3_request *req, unsigned int entry_length,
  1169. unsigned int node)
  1170. {
  1171. unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
  1172. unsigned int rem = req->request.length % maxp;
  1173. unsigned int num_trbs = 1;
  1174. if (dwc3_needs_extra_trb(dep, req))
  1175. num_trbs++;
  1176. if (dwc3_calc_trbs_left(dep) < num_trbs)
  1177. return 0;
  1178. req->needs_extra_trb = num_trbs > 1;
  1179. /* Prepare a normal TRB */
  1180. if (req->direction || req->request.length)
  1181. dwc3_prepare_one_trb(dep, req, entry_length,
  1182. req->needs_extra_trb, node, false, false);
  1183. /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
  1184. if ((!req->direction && !req->request.length) || req->needs_extra_trb)
  1185. dwc3_prepare_one_trb(dep, req,
  1186. req->direction ? 0 : maxp - rem,
  1187. false, 1, true, false);
  1188. return num_trbs;
  1189. }
  1190. static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
  1191. struct dwc3_request *req)
  1192. {
  1193. struct scatterlist *sg = req->start_sg;
  1194. struct scatterlist *s;
  1195. int i;
  1196. unsigned int length = req->request.length;
  1197. unsigned int remaining = req->request.num_mapped_sgs
  1198. - req->num_queued_sgs;
  1199. unsigned int num_trbs = req->num_trbs;
  1200. bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
  1201. /*
  1202. * If we resume preparing the request, then get the remaining length of
  1203. * the request and resume where we left off.
  1204. */
  1205. for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
  1206. length -= sg_dma_len(s);
  1207. for_each_sg(sg, s, remaining, i) {
  1208. unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
  1209. unsigned int trb_length;
  1210. bool must_interrupt = false;
  1211. bool last_sg = false;
  1212. trb_length = min_t(unsigned int, length, sg_dma_len(s));
  1213. length -= trb_length;
  1214. /*
  1215. * IOMMU driver is coalescing the list of sgs which shares a
  1216. * page boundary into one and giving it to USB driver. With
  1217. * this the number of sgs mapped is not equal to the number of
  1218. * sgs passed. So mark the chain bit to false if it isthe last
  1219. * mapped sg.
  1220. */
  1221. if ((i == remaining - 1) || !length)
  1222. last_sg = true;
  1223. if (!num_trbs_left)
  1224. break;
  1225. if (last_sg) {
  1226. if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
  1227. break;
  1228. } else {
  1229. /*
  1230. * Look ahead to check if we have enough TRBs for the
  1231. * next SG entry. If not, set interrupt on this TRB to
  1232. * resume preparing the next SG entry when more TRBs are
  1233. * free.
  1234. */
  1235. if (num_trbs_left == 1 || (needs_extra_trb &&
  1236. num_trbs_left <= 2 &&
  1237. sg_dma_len(sg_next(s)) >= length))
  1238. must_interrupt = true;
  1239. dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
  1240. must_interrupt);
  1241. }
  1242. /*
  1243. * There can be a situation where all sgs in sglist are not
  1244. * queued because of insufficient trb number. To handle this
  1245. * case, update start_sg to next sg to be queued, so that
  1246. * we have free trbs we can continue queuing from where we
  1247. * previously stopped
  1248. */
  1249. if (!last_sg)
  1250. req->start_sg = sg_next(s);
  1251. req->num_queued_sgs++;
  1252. req->num_pending_sgs--;
  1253. /*
  1254. * The number of pending SG entries may not correspond to the
  1255. * number of mapped SG entries. If all the data are queued, then
  1256. * don't include unused SG entries.
  1257. */
  1258. if (length == 0) {
  1259. req->num_pending_sgs = 0;
  1260. break;
  1261. }
  1262. if (must_interrupt)
  1263. break;
  1264. }
  1265. return req->num_trbs - num_trbs;
  1266. }
  1267. static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
  1268. struct dwc3_request *req)
  1269. {
  1270. return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
  1271. }
  1272. /*
  1273. * dwc3_prepare_trbs - setup TRBs from requests
  1274. * @dep: endpoint for which requests are being prepared
  1275. *
  1276. * The function goes through the requests list and sets up TRBs for the
  1277. * transfers. The function returns once there are no more TRBs available or
  1278. * it runs out of requests.
  1279. *
  1280. * Returns the number of TRBs prepared or negative errno.
  1281. */
  1282. static int dwc3_prepare_trbs(struct dwc3_ep *dep)
  1283. {
  1284. struct dwc3_request *req, *n;
  1285. int ret = 0;
  1286. BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
  1287. /*
  1288. * We can get in a situation where there's a request in the started list
  1289. * but there weren't enough TRBs to fully kick it in the first time
  1290. * around, so it has been waiting for more TRBs to be freed up.
  1291. *
  1292. * In that case, we should check if we have a request with pending_sgs
  1293. * in the started list and prepare TRBs for that request first,
  1294. * otherwise we will prepare TRBs completely out of order and that will
  1295. * break things.
  1296. */
  1297. list_for_each_entry(req, &dep->started_list, list) {
  1298. if (req->num_pending_sgs > 0) {
  1299. ret = dwc3_prepare_trbs_sg(dep, req);
  1300. if (!ret || req->num_pending_sgs)
  1301. return ret;
  1302. }
  1303. if (!dwc3_calc_trbs_left(dep))
  1304. return ret;
  1305. /*
  1306. * Don't prepare beyond a transfer. In DWC_usb32, its transfer
  1307. * burst capability may try to read and use TRBs beyond the
  1308. * active transfer instead of stopping.
  1309. */
  1310. if (dep->stream_capable && req->request.is_last &&
  1311. !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
  1312. return ret;
  1313. }
  1314. list_for_each_entry_safe(req, n, &dep->pending_list, list) {
  1315. struct dwc3 *dwc = dep->dwc;
  1316. ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
  1317. dep->direction);
  1318. if (ret)
  1319. return ret;
  1320. req->sg = req->request.sg;
  1321. req->start_sg = req->sg;
  1322. req->num_queued_sgs = 0;
  1323. req->num_pending_sgs = req->request.num_mapped_sgs;
  1324. if (req->num_pending_sgs > 0) {
  1325. ret = dwc3_prepare_trbs_sg(dep, req);
  1326. if (req->num_pending_sgs)
  1327. return ret;
  1328. } else {
  1329. ret = dwc3_prepare_trbs_linear(dep, req);
  1330. }
  1331. if (!ret || !dwc3_calc_trbs_left(dep))
  1332. return ret;
  1333. /*
  1334. * Don't prepare beyond a transfer. In DWC_usb32, its transfer
  1335. * burst capability may try to read and use TRBs beyond the
  1336. * active transfer instead of stopping.
  1337. */
  1338. if (dep->stream_capable && req->request.is_last &&
  1339. !DWC3_MST_CAPABLE(&dwc->hwparams))
  1340. return ret;
  1341. }
  1342. return ret;
  1343. }
  1344. static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
  1345. static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
  1346. {
  1347. struct dwc3_gadget_ep_cmd_params params;
  1348. struct dwc3_request *req;
  1349. int starting;
  1350. int ret;
  1351. u32 cmd;
  1352. /*
  1353. * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
  1354. * This happens when we need to stop and restart a transfer such as in
  1355. * the case of reinitiating a stream or retrying an isoc transfer.
  1356. */
  1357. ret = dwc3_prepare_trbs(dep);
  1358. if (ret < 0)
  1359. return ret;
  1360. starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
  1361. /*
  1362. * If there's no new TRB prepared and we don't need to restart a
  1363. * transfer, there's no need to update the transfer.
  1364. */
  1365. if (!ret && !starting)
  1366. return ret;
  1367. req = next_request(&dep->started_list);
  1368. if (!req) {
  1369. dep->flags |= DWC3_EP_PENDING_REQUEST;
  1370. return 0;
  1371. }
  1372. memset(&params, 0, sizeof(params));
  1373. if (starting) {
  1374. params.param0 = upper_32_bits(req->trb_dma);
  1375. params.param1 = lower_32_bits(req->trb_dma);
  1376. cmd = DWC3_DEPCMD_STARTTRANSFER;
  1377. if (dep->stream_capable)
  1378. cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
  1379. if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
  1380. cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
  1381. } else {
  1382. cmd = DWC3_DEPCMD_UPDATETRANSFER |
  1383. DWC3_DEPCMD_PARAM(dep->resource_index);
  1384. }
  1385. ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  1386. if (ret < 0) {
  1387. struct dwc3_request *tmp;
  1388. if (ret == -EAGAIN)
  1389. return ret;
  1390. dwc3_stop_active_transfer(dep, true, true);
  1391. list_for_each_entry_safe(req, tmp, &dep->started_list, list)
  1392. dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
  1393. /* If ep isn't started, then there's no end transfer pending */
  1394. if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
  1395. dwc3_gadget_ep_cleanup_cancelled_requests(dep);
  1396. return ret;
  1397. }
  1398. if (dep->stream_capable && req->request.is_last &&
  1399. !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
  1400. dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
  1401. return 0;
  1402. }
  1403. static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
  1404. {
  1405. u32 reg;
  1406. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  1407. return DWC3_DSTS_SOFFN(reg);
  1408. }
  1409. /**
  1410. * __dwc3_stop_active_transfer - stop the current active transfer
  1411. * @dep: isoc endpoint
  1412. * @force: set forcerm bit in the command
  1413. * @interrupt: command complete interrupt after End Transfer command
  1414. *
  1415. * When setting force, the ForceRM bit will be set. In that case
  1416. * the controller won't update the TRB progress on command
  1417. * completion. It also won't clear the HWO bit in the TRB.
  1418. * The command will also not complete immediately in that case.
  1419. */
  1420. static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
  1421. {
  1422. struct dwc3 *dwc = dep->dwc;
  1423. struct dwc3_gadget_ep_cmd_params params;
  1424. u32 cmd;
  1425. int ret;
  1426. cmd = DWC3_DEPCMD_ENDTRANSFER;
  1427. cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
  1428. cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
  1429. cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
  1430. memset(&params, 0, sizeof(params));
  1431. ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  1432. /*
  1433. * If the End Transfer command was timed out while the device is
  1434. * not in SETUP phase, it's possible that an incoming Setup packet
  1435. * may prevent the command's completion. Let's retry when the
  1436. * ep0state returns to EP0_SETUP_PHASE.
  1437. */
  1438. if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) {
  1439. dep->flags |= DWC3_EP_DELAY_STOP;
  1440. return 0;
  1441. }
  1442. WARN_ON_ONCE(ret);
  1443. dep->resource_index = 0;
  1444. if (!interrupt) {
  1445. if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
  1446. mdelay(1);
  1447. dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
  1448. } else if (!ret) {
  1449. dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
  1450. }
  1451. dep->flags &= ~DWC3_EP_DELAY_STOP;
  1452. return ret;
  1453. }
  1454. /**
  1455. * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
  1456. * @dep: isoc endpoint
  1457. *
  1458. * This function tests for the correct combination of BIT[15:14] from the 16-bit
  1459. * microframe number reported by the XferNotReady event for the future frame
  1460. * number to start the isoc transfer.
  1461. *
  1462. * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
  1463. * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
  1464. * XferNotReady event are invalid. The driver uses this number to schedule the
  1465. * isochronous transfer and passes it to the START TRANSFER command. Because
  1466. * this number is invalid, the command may fail. If BIT[15:14] matches the
  1467. * internal 16-bit microframe, the START TRANSFER command will pass and the
  1468. * transfer will start at the scheduled time, if it is off by 1, the command
  1469. * will still pass, but the transfer will start 2 seconds in the future. For all
  1470. * other conditions, the START TRANSFER command will fail with bus-expiry.
  1471. *
  1472. * In order to workaround this issue, we can test for the correct combination of
  1473. * BIT[15:14] by sending START TRANSFER commands with different values of
  1474. * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
  1475. * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
  1476. * As the result, within the 4 possible combinations for BIT[15:14], there will
  1477. * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
  1478. * command status will result in a 2-second delay start. The smaller BIT[15:14]
  1479. * value is the correct combination.
  1480. *
  1481. * Since there are only 4 outcomes and the results are ordered, we can simply
  1482. * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
  1483. * deduce the smaller successful combination.
  1484. *
  1485. * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
  1486. * of BIT[15:14]. The correct combination is as follow:
  1487. *
  1488. * if test0 fails and test1 passes, BIT[15:14] is 'b01
  1489. * if test0 fails and test1 fails, BIT[15:14] is 'b10
  1490. * if test0 passes and test1 fails, BIT[15:14] is 'b11
  1491. * if test0 passes and test1 passes, BIT[15:14] is 'b00
  1492. *
  1493. * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
  1494. * endpoints.
  1495. */
  1496. static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
  1497. {
  1498. int cmd_status = 0;
  1499. bool test0;
  1500. bool test1;
  1501. while (dep->combo_num < 2) {
  1502. struct dwc3_gadget_ep_cmd_params params;
  1503. u32 test_frame_number;
  1504. u32 cmd;
  1505. /*
  1506. * Check if we can start isoc transfer on the next interval or
  1507. * 4 uframes in the future with BIT[15:14] as dep->combo_num
  1508. */
  1509. test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
  1510. test_frame_number |= dep->combo_num << 14;
  1511. test_frame_number += max_t(u32, 4, dep->interval);
  1512. params.param0 = upper_32_bits(dep->dwc->bounce_addr);
  1513. params.param1 = lower_32_bits(dep->dwc->bounce_addr);
  1514. cmd = DWC3_DEPCMD_STARTTRANSFER;
  1515. cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
  1516. cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
  1517. /* Redo if some other failure beside bus-expiry is received */
  1518. if (cmd_status && cmd_status != -EAGAIN) {
  1519. dep->start_cmd_status = 0;
  1520. dep->combo_num = 0;
  1521. return 0;
  1522. }
  1523. /* Store the first test status */
  1524. if (dep->combo_num == 0)
  1525. dep->start_cmd_status = cmd_status;
  1526. dep->combo_num++;
  1527. /*
  1528. * End the transfer if the START_TRANSFER command is successful
  1529. * to wait for the next XferNotReady to test the command again
  1530. */
  1531. if (cmd_status == 0) {
  1532. dwc3_stop_active_transfer(dep, true, true);
  1533. return 0;
  1534. }
  1535. }
  1536. /* test0 and test1 are both completed at this point */
  1537. test0 = (dep->start_cmd_status == 0);
  1538. test1 = (cmd_status == 0);
  1539. if (!test0 && test1)
  1540. dep->combo_num = 1;
  1541. else if (!test0 && !test1)
  1542. dep->combo_num = 2;
  1543. else if (test0 && !test1)
  1544. dep->combo_num = 3;
  1545. else if (test0 && test1)
  1546. dep->combo_num = 0;
  1547. dep->frame_number &= DWC3_FRNUMBER_MASK;
  1548. dep->frame_number |= dep->combo_num << 14;
  1549. dep->frame_number += max_t(u32, 4, dep->interval);
  1550. /* Reinitialize test variables */
  1551. dep->start_cmd_status = 0;
  1552. dep->combo_num = 0;
  1553. return __dwc3_gadget_kick_transfer(dep);
  1554. }
  1555. static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
  1556. {
  1557. const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
  1558. struct dwc3 *dwc = dep->dwc;
  1559. int ret;
  1560. int i;
  1561. if (list_empty(&dep->pending_list) &&
  1562. list_empty(&dep->started_list)) {
  1563. dep->flags |= DWC3_EP_PENDING_REQUEST;
  1564. return -EAGAIN;
  1565. }
  1566. if (!dwc->dis_start_transfer_quirk &&
  1567. (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
  1568. DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
  1569. if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
  1570. return dwc3_gadget_start_isoc_quirk(dep);
  1571. }
  1572. if (desc->bInterval <= 14 &&
  1573. dwc->gadget->speed >= USB_SPEED_HIGH) {
  1574. u32 frame = __dwc3_gadget_get_frame(dwc);
  1575. bool rollover = frame <
  1576. (dep->frame_number & DWC3_FRNUMBER_MASK);
  1577. /*
  1578. * frame_number is set from XferNotReady and may be already
  1579. * out of date. DSTS only provides the lower 14 bit of the
  1580. * current frame number. So add the upper two bits of
  1581. * frame_number and handle a possible rollover.
  1582. * This will provide the correct frame_number unless more than
  1583. * rollover has happened since XferNotReady.
  1584. */
  1585. dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
  1586. frame;
  1587. if (rollover)
  1588. dep->frame_number += BIT(14);
  1589. }
  1590. for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
  1591. int future_interval = i + 1;
  1592. /* Give the controller at least 500us to schedule transfers */
  1593. if (desc->bInterval < 3)
  1594. future_interval += 3 - desc->bInterval;
  1595. dep->frame_number = DWC3_ALIGN_FRAME(dep, future_interval);
  1596. ret = __dwc3_gadget_kick_transfer(dep);
  1597. if (ret != -EAGAIN)
  1598. break;
  1599. }
  1600. /*
  1601. * After a number of unsuccessful start attempts due to bus-expiry
  1602. * status, issue END_TRANSFER command and retry on the next XferNotReady
  1603. * event.
  1604. */
  1605. if (ret == -EAGAIN)
  1606. ret = __dwc3_stop_active_transfer(dep, false, true);
  1607. return ret;
  1608. }
  1609. static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
  1610. {
  1611. struct dwc3 *dwc = dep->dwc;
  1612. if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
  1613. dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
  1614. dep->name);
  1615. return -ESHUTDOWN;
  1616. }
  1617. if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
  1618. &req->request, req->dep->name))
  1619. return -EINVAL;
  1620. if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
  1621. "%s: request %pK already in flight\n",
  1622. dep->name, &req->request))
  1623. return -EINVAL;
  1624. pm_runtime_get(dwc->dev);
  1625. req->request.actual = 0;
  1626. req->request.status = -EINPROGRESS;
  1627. trace_dwc3_ep_queue(req);
  1628. list_add_tail(&req->list, &dep->pending_list);
  1629. req->status = DWC3_REQUEST_STATUS_QUEUED;
  1630. if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
  1631. return 0;
  1632. /*
  1633. * Start the transfer only after the END_TRANSFER is completed
  1634. * and endpoint STALL is cleared.
  1635. */
  1636. if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
  1637. (dep->flags & DWC3_EP_WEDGE) ||
  1638. (dep->flags & DWC3_EP_DELAY_STOP) ||
  1639. (dep->flags & DWC3_EP_STALL)) {
  1640. dep->flags |= DWC3_EP_DELAY_START;
  1641. return 0;
  1642. }
  1643. /*
  1644. * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
  1645. * wait for a XferNotReady event so we will know what's the current
  1646. * (micro-)frame number.
  1647. *
  1648. * Without this trick, we are very, very likely gonna get Bus Expiry
  1649. * errors which will force us issue EndTransfer command.
  1650. */
  1651. if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
  1652. if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
  1653. if ((dep->flags & DWC3_EP_PENDING_REQUEST))
  1654. return __dwc3_gadget_start_isoc(dep);
  1655. return 0;
  1656. }
  1657. }
  1658. __dwc3_gadget_kick_transfer(dep);
  1659. return 0;
  1660. }
  1661. static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
  1662. gfp_t gfp_flags)
  1663. {
  1664. struct dwc3_request *req = to_dwc3_request(request);
  1665. struct dwc3_ep *dep = to_dwc3_ep(ep);
  1666. struct dwc3 *dwc = dep->dwc;
  1667. unsigned long flags;
  1668. int ret;
  1669. spin_lock_irqsave(&dwc->lock, flags);
  1670. ret = __dwc3_gadget_ep_queue(dep, req);
  1671. spin_unlock_irqrestore(&dwc->lock, flags);
  1672. return ret;
  1673. }
  1674. static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
  1675. {
  1676. int i;
  1677. /* If req->trb is not set, then the request has not started */
  1678. if (!req->trb)
  1679. return;
  1680. /*
  1681. * If request was already started, this means we had to
  1682. * stop the transfer. With that we also need to ignore
  1683. * all TRBs used by the request, however TRBs can only
  1684. * be modified after completion of END_TRANSFER
  1685. * command. So what we do here is that we wait for
  1686. * END_TRANSFER completion and only after that, we jump
  1687. * over TRBs by clearing HWO and incrementing dequeue
  1688. * pointer.
  1689. */
  1690. for (i = 0; i < req->num_trbs; i++) {
  1691. struct dwc3_trb *trb;
  1692. trb = &dep->trb_pool[dep->trb_dequeue];
  1693. trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
  1694. dwc3_ep_inc_deq(dep);
  1695. }
  1696. req->num_trbs = 0;
  1697. }
  1698. static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
  1699. {
  1700. struct dwc3_request *req;
  1701. struct dwc3 *dwc = dep->dwc;
  1702. while (!list_empty(&dep->cancelled_list)) {
  1703. req = next_request(&dep->cancelled_list);
  1704. dwc3_gadget_ep_skip_trbs(dep, req);
  1705. switch (req->status) {
  1706. case DWC3_REQUEST_STATUS_DISCONNECTED:
  1707. dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
  1708. break;
  1709. case DWC3_REQUEST_STATUS_DEQUEUED:
  1710. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  1711. break;
  1712. case DWC3_REQUEST_STATUS_STALLED:
  1713. dwc3_gadget_giveback(dep, req, -EPIPE);
  1714. break;
  1715. default:
  1716. dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
  1717. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  1718. break;
  1719. }
  1720. /*
  1721. * The endpoint is disabled, let the dwc3_remove_requests()
  1722. * handle the cleanup.
  1723. */
  1724. if (!dep->endpoint.desc)
  1725. break;
  1726. }
  1727. }
  1728. static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
  1729. struct usb_request *request)
  1730. {
  1731. struct dwc3_request *req = to_dwc3_request(request);
  1732. struct dwc3_request *r = NULL;
  1733. struct dwc3_ep *dep = to_dwc3_ep(ep);
  1734. struct dwc3 *dwc = dep->dwc;
  1735. unsigned long flags;
  1736. int ret = 0;
  1737. trace_dwc3_ep_dequeue(req);
  1738. spin_lock_irqsave(&dwc->lock, flags);
  1739. list_for_each_entry(r, &dep->cancelled_list, list) {
  1740. if (r == req)
  1741. goto out;
  1742. }
  1743. list_for_each_entry(r, &dep->pending_list, list) {
  1744. if (r == req) {
  1745. /*
  1746. * Explicitly check for EP0/1 as dequeue for those
  1747. * EPs need to be handled differently. Control EP
  1748. * only deals with one USB req, and giveback will
  1749. * occur during dwc3_ep0_stall_and_restart(). EP0
  1750. * requests are never added to started_list.
  1751. */
  1752. if (dep->number > 1)
  1753. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  1754. else
  1755. dwc3_ep0_reset_state(dwc);
  1756. goto out;
  1757. }
  1758. }
  1759. list_for_each_entry(r, &dep->started_list, list) {
  1760. if (r == req) {
  1761. struct dwc3_request *t;
  1762. /* wait until it is processed */
  1763. dwc3_stop_active_transfer(dep, true, true);
  1764. /*
  1765. * Remove any started request if the transfer is
  1766. * cancelled.
  1767. */
  1768. list_for_each_entry_safe(r, t, &dep->started_list, list)
  1769. dwc3_gadget_move_cancelled_request(r,
  1770. DWC3_REQUEST_STATUS_DEQUEUED);
  1771. dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
  1772. goto out;
  1773. }
  1774. }
  1775. dev_err(dwc->dev, "request %pK was not queued to %s\n",
  1776. request, ep->name);
  1777. ret = -EINVAL;
  1778. out:
  1779. spin_unlock_irqrestore(&dwc->lock, flags);
  1780. return ret;
  1781. }
  1782. int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
  1783. {
  1784. struct dwc3_gadget_ep_cmd_params params;
  1785. struct dwc3 *dwc = dep->dwc;
  1786. struct dwc3_request *req;
  1787. struct dwc3_request *tmp;
  1788. int ret;
  1789. if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
  1790. dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
  1791. return -EINVAL;
  1792. }
  1793. memset(&params, 0x00, sizeof(params));
  1794. if (value) {
  1795. struct dwc3_trb *trb;
  1796. unsigned int transfer_in_flight;
  1797. unsigned int started;
  1798. if (dep->number > 1)
  1799. trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
  1800. else
  1801. trb = &dwc->ep0_trb[dep->trb_enqueue];
  1802. transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
  1803. started = !list_empty(&dep->started_list);
  1804. if (!protocol && ((dep->direction && transfer_in_flight) ||
  1805. (!dep->direction && started))) {
  1806. return -EAGAIN;
  1807. }
  1808. ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
  1809. &params);
  1810. if (ret)
  1811. dev_err(dwc->dev, "failed to set STALL on %s\n",
  1812. dep->name);
  1813. else
  1814. dep->flags |= DWC3_EP_STALL;
  1815. } else {
  1816. /*
  1817. * Don't issue CLEAR_STALL command to control endpoints. The
  1818. * controller automatically clears the STALL when it receives
  1819. * the SETUP token.
  1820. */
  1821. if (dep->number <= 1) {
  1822. dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
  1823. return 0;
  1824. }
  1825. dwc3_stop_active_transfer(dep, true, true);
  1826. list_for_each_entry_safe(req, tmp, &dep->started_list, list)
  1827. dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED);
  1828. if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
  1829. (dep->flags & DWC3_EP_DELAY_STOP)) {
  1830. dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
  1831. if (protocol)
  1832. dwc->clear_stall_protocol = dep->number;
  1833. return 0;
  1834. }
  1835. dwc3_gadget_ep_cleanup_cancelled_requests(dep);
  1836. ret = dwc3_send_clear_stall_ep_cmd(dep);
  1837. if (ret) {
  1838. dev_err(dwc->dev, "failed to clear STALL on %s\n",
  1839. dep->name);
  1840. return ret;
  1841. }
  1842. dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
  1843. if ((dep->flags & DWC3_EP_DELAY_START) &&
  1844. !usb_endpoint_xfer_isoc(dep->endpoint.desc))
  1845. __dwc3_gadget_kick_transfer(dep);
  1846. dep->flags &= ~DWC3_EP_DELAY_START;
  1847. }
  1848. return ret;
  1849. }
  1850. static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
  1851. {
  1852. struct dwc3_ep *dep = to_dwc3_ep(ep);
  1853. struct dwc3 *dwc = dep->dwc;
  1854. unsigned long flags;
  1855. int ret;
  1856. spin_lock_irqsave(&dwc->lock, flags);
  1857. ret = __dwc3_gadget_ep_set_halt(dep, value, false);
  1858. spin_unlock_irqrestore(&dwc->lock, flags);
  1859. return ret;
  1860. }
  1861. static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
  1862. {
  1863. struct dwc3_ep *dep = to_dwc3_ep(ep);
  1864. struct dwc3 *dwc = dep->dwc;
  1865. unsigned long flags;
  1866. int ret;
  1867. spin_lock_irqsave(&dwc->lock, flags);
  1868. dep->flags |= DWC3_EP_WEDGE;
  1869. if (dep->number == 0 || dep->number == 1)
  1870. ret = __dwc3_gadget_ep0_set_halt(ep, 1);
  1871. else
  1872. ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
  1873. spin_unlock_irqrestore(&dwc->lock, flags);
  1874. return ret;
  1875. }
  1876. /* -------------------------------------------------------------------------- */
  1877. static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
  1878. .bLength = USB_DT_ENDPOINT_SIZE,
  1879. .bDescriptorType = USB_DT_ENDPOINT,
  1880. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  1881. };
  1882. static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
  1883. .enable = dwc3_gadget_ep0_enable,
  1884. .disable = dwc3_gadget_ep0_disable,
  1885. .alloc_request = dwc3_gadget_ep_alloc_request,
  1886. .free_request = dwc3_gadget_ep_free_request,
  1887. .queue = dwc3_gadget_ep0_queue,
  1888. .dequeue = dwc3_gadget_ep_dequeue,
  1889. .set_halt = dwc3_gadget_ep0_set_halt,
  1890. .set_wedge = dwc3_gadget_ep_set_wedge,
  1891. };
  1892. static const struct usb_ep_ops dwc3_gadget_ep_ops = {
  1893. .enable = dwc3_gadget_ep_enable,
  1894. .disable = dwc3_gadget_ep_disable,
  1895. .alloc_request = dwc3_gadget_ep_alloc_request,
  1896. .free_request = dwc3_gadget_ep_free_request,
  1897. .queue = dwc3_gadget_ep_queue,
  1898. .dequeue = dwc3_gadget_ep_dequeue,
  1899. .set_halt = dwc3_gadget_ep_set_halt,
  1900. .set_wedge = dwc3_gadget_ep_set_wedge,
  1901. };
  1902. /* -------------------------------------------------------------------------- */
  1903. static int dwc3_gadget_get_frame(struct usb_gadget *g)
  1904. {
  1905. struct dwc3 *dwc = gadget_to_dwc(g);
  1906. return __dwc3_gadget_get_frame(dwc);
  1907. }
  1908. static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
  1909. {
  1910. int retries;
  1911. int ret;
  1912. u32 reg;
  1913. u8 link_state;
  1914. /*
  1915. * According to the Databook Remote wakeup request should
  1916. * be issued only when the device is in early suspend state.
  1917. *
  1918. * We can check that via USB Link State bits in DSTS register.
  1919. */
  1920. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  1921. link_state = DWC3_DSTS_USBLNKST(reg);
  1922. switch (link_state) {
  1923. case DWC3_LINK_STATE_RESET:
  1924. case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
  1925. case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
  1926. case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
  1927. case DWC3_LINK_STATE_U1:
  1928. case DWC3_LINK_STATE_RESUME:
  1929. break;
  1930. default:
  1931. return -EINVAL;
  1932. }
  1933. ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
  1934. if (ret < 0) {
  1935. dev_err(dwc->dev, "failed to put link in Recovery\n");
  1936. return ret;
  1937. }
  1938. /* Recent versions do this automatically */
  1939. if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
  1940. /* write zeroes to Link Change Request */
  1941. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  1942. reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
  1943. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1944. }
  1945. /* poll until Link State changes to ON */
  1946. retries = 20000;
  1947. while (retries--) {
  1948. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  1949. /* in HS, means ON */
  1950. if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
  1951. break;
  1952. }
  1953. if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
  1954. dev_err(dwc->dev, "failed to send remote wakeup\n");
  1955. return -EINVAL;
  1956. }
  1957. return 0;
  1958. }
  1959. static int dwc3_gadget_wakeup(struct usb_gadget *g)
  1960. {
  1961. struct dwc3 *dwc = gadget_to_dwc(g);
  1962. unsigned long flags;
  1963. int ret;
  1964. spin_lock_irqsave(&dwc->lock, flags);
  1965. ret = __dwc3_gadget_wakeup(dwc);
  1966. spin_unlock_irqrestore(&dwc->lock, flags);
  1967. return ret;
  1968. }
  1969. static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
  1970. int is_selfpowered)
  1971. {
  1972. struct dwc3 *dwc = gadget_to_dwc(g);
  1973. unsigned long flags;
  1974. spin_lock_irqsave(&dwc->lock, flags);
  1975. g->is_selfpowered = !!is_selfpowered;
  1976. spin_unlock_irqrestore(&dwc->lock, flags);
  1977. return 0;
  1978. }
  1979. static void dwc3_stop_active_transfers(struct dwc3 *dwc)
  1980. {
  1981. u32 epnum;
  1982. for (epnum = 2; epnum < dwc->num_eps; epnum++) {
  1983. struct dwc3_ep *dep;
  1984. dep = dwc->eps[epnum];
  1985. if (!dep)
  1986. continue;
  1987. dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
  1988. }
  1989. }
  1990. static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
  1991. {
  1992. enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate;
  1993. u32 reg;
  1994. if (ssp_rate == USB_SSP_GEN_UNKNOWN)
  1995. ssp_rate = dwc->max_ssp_rate;
  1996. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  1997. reg &= ~DWC3_DCFG_SPEED_MASK;
  1998. reg &= ~DWC3_DCFG_NUMLANES(~0);
  1999. if (ssp_rate == USB_SSP_GEN_1x2)
  2000. reg |= DWC3_DCFG_SUPERSPEED;
  2001. else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
  2002. reg |= DWC3_DCFG_SUPERSPEED_PLUS;
  2003. if (ssp_rate != USB_SSP_GEN_2x1 &&
  2004. dwc->max_ssp_rate != USB_SSP_GEN_2x1)
  2005. reg |= DWC3_DCFG_NUMLANES(1);
  2006. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  2007. }
  2008. static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
  2009. {
  2010. enum usb_device_speed speed;
  2011. u32 reg;
  2012. speed = dwc->gadget_max_speed;
  2013. if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
  2014. speed = dwc->maximum_speed;
  2015. if (speed == USB_SPEED_SUPER_PLUS &&
  2016. DWC3_IP_IS(DWC32)) {
  2017. __dwc3_gadget_set_ssp_rate(dwc);
  2018. return;
  2019. }
  2020. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  2021. reg &= ~(DWC3_DCFG_SPEED_MASK);
  2022. /*
  2023. * WORKAROUND: DWC3 revision < 2.20a have an issue
  2024. * which would cause metastability state on Run/Stop
  2025. * bit if we try to force the IP to USB2-only mode.
  2026. *
  2027. * Because of that, we cannot configure the IP to any
  2028. * speed other than the SuperSpeed
  2029. *
  2030. * Refers to:
  2031. *
  2032. * STAR#9000525659: Clock Domain Crossing on DCTL in
  2033. * USB 2.0 Mode
  2034. */
  2035. if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
  2036. !dwc->dis_metastability_quirk) {
  2037. reg |= DWC3_DCFG_SUPERSPEED;
  2038. } else {
  2039. switch (speed) {
  2040. case USB_SPEED_FULL:
  2041. reg |= DWC3_DCFG_FULLSPEED;
  2042. break;
  2043. case USB_SPEED_HIGH:
  2044. reg |= DWC3_DCFG_HIGHSPEED;
  2045. break;
  2046. case USB_SPEED_SUPER:
  2047. reg |= DWC3_DCFG_SUPERSPEED;
  2048. break;
  2049. case USB_SPEED_SUPER_PLUS:
  2050. if (DWC3_IP_IS(DWC3))
  2051. reg |= DWC3_DCFG_SUPERSPEED;
  2052. else
  2053. reg |= DWC3_DCFG_SUPERSPEED_PLUS;
  2054. break;
  2055. default:
  2056. dev_err(dwc->dev, "invalid speed (%d)\n", speed);
  2057. if (DWC3_IP_IS(DWC3))
  2058. reg |= DWC3_DCFG_SUPERSPEED;
  2059. else
  2060. reg |= DWC3_DCFG_SUPERSPEED_PLUS;
  2061. }
  2062. }
  2063. if (DWC3_IP_IS(DWC32) &&
  2064. speed > USB_SPEED_UNKNOWN &&
  2065. speed < USB_SPEED_SUPER_PLUS)
  2066. reg &= ~DWC3_DCFG_NUMLANES(~0);
  2067. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  2068. }
  2069. static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
  2070. {
  2071. u32 reg;
  2072. u32 timeout = 2000;
  2073. if (pm_runtime_suspended(dwc->dev))
  2074. return 0;
  2075. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  2076. if (is_on) {
  2077. if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
  2078. reg &= ~DWC3_DCTL_TRGTULST_MASK;
  2079. reg |= DWC3_DCTL_TRGTULST_RX_DET;
  2080. }
  2081. if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
  2082. reg &= ~DWC3_DCTL_KEEP_CONNECT;
  2083. reg |= DWC3_DCTL_RUN_STOP;
  2084. __dwc3_gadget_set_speed(dwc);
  2085. dwc->pullups_connected = true;
  2086. } else {
  2087. reg &= ~DWC3_DCTL_RUN_STOP;
  2088. dwc->pullups_connected = false;
  2089. }
  2090. dwc3_gadget_dctl_write_safe(dwc, reg);
  2091. do {
  2092. usleep_range(1000, 2000);
  2093. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  2094. reg &= DWC3_DSTS_DEVCTRLHLT;
  2095. } while (--timeout && !(!is_on ^ !reg));
  2096. if (!timeout)
  2097. return -ETIMEDOUT;
  2098. return 0;
  2099. }
  2100. static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
  2101. static void __dwc3_gadget_stop(struct dwc3 *dwc);
  2102. static int __dwc3_gadget_start(struct dwc3 *dwc);
  2103. static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
  2104. {
  2105. unsigned long flags;
  2106. int ret;
  2107. spin_lock_irqsave(&dwc->lock, flags);
  2108. dwc->connected = false;
  2109. dev_info(dwc->dev, "%s dwc->connected: %d\n", __func__, dwc->connected);
  2110. /*
  2111. * Attempt to end pending SETUP status phase, and not wait for the
  2112. * function to do so.
  2113. */
  2114. if (dwc->delayed_status)
  2115. dwc3_ep0_send_delayed_status(dwc);
  2116. /*
  2117. * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
  2118. * Section 4.1.8 Table 4-7, it states that for a device-initiated
  2119. * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
  2120. * command for any active transfers" before clearing the RunStop
  2121. * bit.
  2122. */
  2123. dwc3_stop_active_transfers(dwc);
  2124. spin_unlock_irqrestore(&dwc->lock, flags);
  2125. /*
  2126. * Per databook, when we want to stop the gadget, if a control transfer
  2127. * is still in process, complete it and get the core into setup phase.
  2128. * In case the host is unresponsive to a SETUP transaction, forcefully
  2129. * stall the transfer, and move back to the SETUP phase, so that any
  2130. * pending endxfers can be executed.
  2131. */
  2132. if (dwc->ep0state != EP0_SETUP_PHASE) {
  2133. reinit_completion(&dwc->ep0_in_setup);
  2134. ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
  2135. msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
  2136. if (ret == 0) {
  2137. dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
  2138. spin_lock_irqsave(&dwc->lock, flags);
  2139. dwc3_ep0_reset_state(dwc);
  2140. spin_unlock_irqrestore(&dwc->lock, flags);
  2141. }
  2142. }
  2143. /*
  2144. * Note: if the GEVNTCOUNT indicates events in the event buffer, the
  2145. * driver needs to acknowledge them before the controller can halt.
  2146. * Simply let the interrupt handler acknowledges and handle the
  2147. * remaining event generated by the controller while polling for
  2148. * DSTS.DEVCTLHLT.
  2149. */
  2150. ret = dwc3_gadget_run_stop(dwc, false);
  2151. /*
  2152. * Stop the gadget after controller is halted, so that if needed, the
  2153. * events to update EP0 state can still occur while the run/stop
  2154. * routine polls for the halted state. DEVTEN is cleared as part of
  2155. * gadget stop.
  2156. */
  2157. spin_lock_irqsave(&dwc->lock, flags);
  2158. __dwc3_gadget_stop(dwc);
  2159. spin_unlock_irqrestore(&dwc->lock, flags);
  2160. return ret;
  2161. }
  2162. static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
  2163. {
  2164. /*
  2165. * In the Synopsys DWC_usb31 1.90a programming guide section
  2166. * 4.1.9, it specifies that for a reconnect after a
  2167. * device-initiated disconnect requires a core soft reset
  2168. * (DCTL.CSftRst) before enabling the run/stop bit.
  2169. */
  2170. dwc3_core_soft_reset(dwc);
  2171. dwc3_event_buffers_setup(dwc);
  2172. __dwc3_gadget_start(dwc);
  2173. return dwc3_gadget_run_stop(dwc, true);
  2174. }
  2175. static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
  2176. {
  2177. struct dwc3 *dwc = gadget_to_dwc(g);
  2178. int ret;
  2179. is_on = !!is_on;
  2180. dwc->softconnect = is_on;
  2181. /*
  2182. * Avoid issuing a runtime resume if the device is already in the
  2183. * suspended state during gadget disconnect. DWC3 gadget was already
  2184. * halted/stopped during runtime suspend.
  2185. */
  2186. if (!is_on) {
  2187. pm_runtime_barrier(dwc->dev);
  2188. if (pm_runtime_suspended(dwc->dev))
  2189. return 0;
  2190. }
  2191. /*
  2192. * Check the return value for successful resume, or error. For a
  2193. * successful resume, the DWC3 runtime PM resume routine will handle
  2194. * the run stop sequence, so avoid duplicate operations here.
  2195. */
  2196. ret = pm_runtime_get_sync(dwc->dev);
  2197. if (!ret || ret < 0) {
  2198. pm_runtime_put(dwc->dev);
  2199. if (ret < 0)
  2200. pm_runtime_set_suspended(dwc->dev);
  2201. return ret;
  2202. }
  2203. if (dwc->pullups_connected == is_on) {
  2204. pm_runtime_put(dwc->dev);
  2205. return 0;
  2206. }
  2207. synchronize_irq(dwc->irq_gadget);
  2208. if (!is_on) {
  2209. ret = dwc3_gadget_soft_disconnect(dwc);
  2210. } else {
  2211. /*
  2212. * In the Synopsys DWC_usb31 1.90a programming guide section
  2213. * 4.1.9, it specifies that for a reconnect after a
  2214. * device-initiated disconnect requires a core soft reset
  2215. * (DCTL.CSftRst) before enabling the run/stop bit.
  2216. */
  2217. ret = dwc3_core_soft_reset(dwc);
  2218. if (ret)
  2219. goto done;
  2220. dwc3_event_buffers_setup(dwc);
  2221. __dwc3_gadget_start(dwc);
  2222. ret = dwc3_gadget_run_stop(dwc, true);
  2223. }
  2224. done:
  2225. pm_runtime_put(dwc->dev);
  2226. return ret;
  2227. }
  2228. static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
  2229. {
  2230. u32 reg;
  2231. /* Enable all but Start and End of Frame IRQs */
  2232. reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
  2233. DWC3_DEVTEN_CMDCMPLTEN |
  2234. DWC3_DEVTEN_ERRTICERREN |
  2235. DWC3_DEVTEN_WKUPEVTEN |
  2236. DWC3_DEVTEN_CONNECTDONEEN |
  2237. DWC3_DEVTEN_USBRSTEN |
  2238. DWC3_DEVTEN_DISCONNEVTEN);
  2239. if (DWC3_VER_IS_PRIOR(DWC3, 250A))
  2240. reg |= DWC3_DEVTEN_ULSTCNGEN;
  2241. /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
  2242. if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
  2243. reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
  2244. dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
  2245. }
  2246. static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
  2247. {
  2248. /* mask all interrupts */
  2249. dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
  2250. }
  2251. static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
  2252. static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
  2253. /**
  2254. * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
  2255. * @dwc: pointer to our context structure
  2256. *
  2257. * The following looks like complex but it's actually very simple. In order to
  2258. * calculate the number of packets we can burst at once on OUT transfers, we're
  2259. * gonna use RxFIFO size.
  2260. *
  2261. * To calculate RxFIFO size we need two numbers:
  2262. * MDWIDTH = size, in bits, of the internal memory bus
  2263. * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
  2264. *
  2265. * Given these two numbers, the formula is simple:
  2266. *
  2267. * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
  2268. *
  2269. * 24 bytes is for 3x SETUP packets
  2270. * 16 bytes is a clock domain crossing tolerance
  2271. *
  2272. * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
  2273. */
  2274. static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
  2275. {
  2276. u32 ram2_depth;
  2277. u32 mdwidth;
  2278. u32 nump;
  2279. u32 reg;
  2280. ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
  2281. mdwidth = dwc3_mdwidth(dwc);
  2282. nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
  2283. nump = min_t(u32, nump, 16);
  2284. /* update NumP */
  2285. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  2286. reg &= ~DWC3_DCFG_NUMP_MASK;
  2287. reg |= nump << DWC3_DCFG_NUMP_SHIFT;
  2288. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  2289. }
  2290. static int __dwc3_gadget_start(struct dwc3 *dwc)
  2291. {
  2292. struct dwc3_ep *dep;
  2293. int ret = 0;
  2294. u32 reg;
  2295. /*
  2296. * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
  2297. * the core supports IMOD, disable it.
  2298. */
  2299. if (dwc->imod_interval) {
  2300. dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
  2301. dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
  2302. } else if (dwc3_has_imod(dwc)) {
  2303. dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
  2304. }
  2305. /*
  2306. * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
  2307. * field instead of letting dwc3 itself calculate that automatically.
  2308. *
  2309. * This way, we maximize the chances that we'll be able to get several
  2310. * bursts of data without going through any sort of endpoint throttling.
  2311. */
  2312. reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
  2313. if (DWC3_IP_IS(DWC3))
  2314. reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
  2315. else
  2316. reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
  2317. dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
  2318. dwc3_gadget_setup_nump(dwc);
  2319. /*
  2320. * Currently the controller handles single stream only. So, Ignore
  2321. * Packet Pending bit for stream selection and don't search for another
  2322. * stream if the host sends Data Packet with PP=0 (for OUT direction) or
  2323. * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
  2324. * the stream performance.
  2325. */
  2326. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  2327. reg |= DWC3_DCFG_IGNSTRMPP;
  2328. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  2329. /* Enable MST by default if the device is capable of MST */
  2330. if (DWC3_MST_CAPABLE(&dwc->hwparams)) {
  2331. reg = dwc3_readl(dwc->regs, DWC3_DCFG1);
  2332. reg &= ~DWC3_DCFG1_DIS_MST_ENH;
  2333. dwc3_writel(dwc->regs, DWC3_DCFG1, reg);
  2334. }
  2335. /* Start with SuperSpeed Default */
  2336. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  2337. dep = dwc->eps[0];
  2338. dep->flags = 0;
  2339. ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
  2340. if (ret) {
  2341. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  2342. goto err0;
  2343. }
  2344. dep = dwc->eps[1];
  2345. dep->flags = 0;
  2346. ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
  2347. if (ret) {
  2348. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  2349. goto err1;
  2350. }
  2351. /* begin to receive SETUP packets */
  2352. dwc->ep0state = EP0_SETUP_PHASE;
  2353. dwc->ep0_bounced = false;
  2354. dwc->link_state = DWC3_LINK_STATE_SS_DIS;
  2355. dwc->delayed_status = false;
  2356. dwc3_ep0_out_start(dwc);
  2357. dwc3_gadget_enable_irq(dwc);
  2358. return 0;
  2359. err1:
  2360. __dwc3_gadget_ep_disable(dwc->eps[0]);
  2361. err0:
  2362. return ret;
  2363. }
  2364. static int dwc3_gadget_start(struct usb_gadget *g,
  2365. struct usb_gadget_driver *driver)
  2366. {
  2367. struct dwc3 *dwc = gadget_to_dwc(g);
  2368. unsigned long flags;
  2369. int ret;
  2370. int irq;
  2371. irq = dwc->irq_gadget;
  2372. ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
  2373. IRQF_SHARED, "dwc3", dwc->ev_buf);
  2374. if (ret) {
  2375. dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
  2376. irq, ret);
  2377. return ret;
  2378. }
  2379. spin_lock_irqsave(&dwc->lock, flags);
  2380. dwc->gadget_driver = driver;
  2381. spin_unlock_irqrestore(&dwc->lock, flags);
  2382. return 0;
  2383. }
  2384. static void __dwc3_gadget_stop(struct dwc3 *dwc)
  2385. {
  2386. dwc3_gadget_disable_irq(dwc);
  2387. __dwc3_gadget_ep_disable(dwc->eps[0]);
  2388. __dwc3_gadget_ep_disable(dwc->eps[1]);
  2389. }
  2390. static int dwc3_gadget_stop(struct usb_gadget *g)
  2391. {
  2392. struct dwc3 *dwc = gadget_to_dwc(g);
  2393. unsigned long flags;
  2394. spin_lock_irqsave(&dwc->lock, flags);
  2395. dwc->gadget_driver = NULL;
  2396. dwc->max_cfg_eps = 0;
  2397. spin_unlock_irqrestore(&dwc->lock, flags);
  2398. free_irq(dwc->irq_gadget, dwc->ev_buf);
  2399. return 0;
  2400. }
  2401. static void dwc3_gadget_config_params(struct usb_gadget *g,
  2402. struct usb_dcd_config_params *params)
  2403. {
  2404. struct dwc3 *dwc = gadget_to_dwc(g);
  2405. params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
  2406. params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
  2407. /* Recommended BESL */
  2408. if (!dwc->dis_enblslpm_quirk) {
  2409. /*
  2410. * If the recommended BESL baseline is 0 or if the BESL deep is
  2411. * less than 2, Microsoft's Windows 10 host usb stack will issue
  2412. * a usb reset immediately after it receives the extended BOS
  2413. * descriptor and the enumeration will fail. To maintain
  2414. * compatibility with the Windows' usb stack, let's set the
  2415. * recommended BESL baseline to 1 and clamp the BESL deep to be
  2416. * within 2 to 15.
  2417. */
  2418. params->besl_baseline = 1;
  2419. if (dwc->is_utmi_l1_suspend)
  2420. params->besl_deep =
  2421. clamp_t(u8, dwc->hird_threshold, 2, 15);
  2422. }
  2423. /* U1 Device exit Latency */
  2424. if (dwc->dis_u1_entry_quirk)
  2425. params->bU1devExitLat = 0;
  2426. else
  2427. params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
  2428. /* U2 Device exit Latency */
  2429. if (dwc->dis_u2_entry_quirk)
  2430. params->bU2DevExitLat = 0;
  2431. else
  2432. params->bU2DevExitLat =
  2433. cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
  2434. }
  2435. static void dwc3_gadget_set_speed(struct usb_gadget *g,
  2436. enum usb_device_speed speed)
  2437. {
  2438. struct dwc3 *dwc = gadget_to_dwc(g);
  2439. unsigned long flags;
  2440. spin_lock_irqsave(&dwc->lock, flags);
  2441. dwc->gadget_max_speed = speed;
  2442. spin_unlock_irqrestore(&dwc->lock, flags);
  2443. }
  2444. static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
  2445. enum usb_ssp_rate rate)
  2446. {
  2447. struct dwc3 *dwc = gadget_to_dwc(g);
  2448. unsigned long flags;
  2449. spin_lock_irqsave(&dwc->lock, flags);
  2450. dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
  2451. dwc->gadget_ssp_rate = rate;
  2452. spin_unlock_irqrestore(&dwc->lock, flags);
  2453. }
  2454. static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
  2455. {
  2456. struct dwc3 *dwc = gadget_to_dwc(g);
  2457. union power_supply_propval val = {0};
  2458. int ret;
  2459. if (dwc->usb2_phy)
  2460. return usb_phy_set_power(dwc->usb2_phy, mA);
  2461. if (!dwc->usb_psy)
  2462. return -EOPNOTSUPP;
  2463. val.intval = 1000 * mA;
  2464. ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
  2465. return ret;
  2466. }
  2467. /**
  2468. * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
  2469. * @g: pointer to the USB gadget
  2470. *
  2471. * Used to record the maximum number of endpoints being used in a USB composite
  2472. * device. (across all configurations) This is to be used in the calculation
  2473. * of the TXFIFO sizes when resizing internal memory for individual endpoints.
  2474. * It will help ensured that the resizing logic reserves enough space for at
  2475. * least one max packet.
  2476. */
  2477. static int dwc3_gadget_check_config(struct usb_gadget *g)
  2478. {
  2479. struct dwc3 *dwc = gadget_to_dwc(g);
  2480. struct usb_ep *ep;
  2481. int fifo_size = 0;
  2482. int ram1_depth;
  2483. int ep_num = 0;
  2484. if (!dwc->do_fifo_resize)
  2485. return 0;
  2486. list_for_each_entry(ep, &g->ep_list, ep_list) {
  2487. /* Only interested in the IN endpoints */
  2488. if (ep->claimed && (ep->address & USB_DIR_IN))
  2489. ep_num++;
  2490. }
  2491. if (ep_num <= dwc->max_cfg_eps)
  2492. return 0;
  2493. /* Update the max number of eps in the composition */
  2494. dwc->max_cfg_eps = ep_num;
  2495. fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
  2496. /* Based on the equation, increment by one for every ep */
  2497. fifo_size += dwc->max_cfg_eps;
  2498. /* Check if we can fit a single fifo per endpoint */
  2499. ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
  2500. if (fifo_size > ram1_depth)
  2501. return -ENOMEM;
  2502. return 0;
  2503. }
  2504. static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
  2505. {
  2506. struct dwc3 *dwc = gadget_to_dwc(g);
  2507. unsigned long flags;
  2508. spin_lock_irqsave(&dwc->lock, flags);
  2509. dwc->async_callbacks = enable;
  2510. spin_unlock_irqrestore(&dwc->lock, flags);
  2511. }
  2512. static const struct usb_gadget_ops dwc3_gadget_ops = {
  2513. .get_frame = dwc3_gadget_get_frame,
  2514. .wakeup = dwc3_gadget_wakeup,
  2515. .set_selfpowered = dwc3_gadget_set_selfpowered,
  2516. .pullup = dwc3_gadget_pullup,
  2517. .udc_start = dwc3_gadget_start,
  2518. .udc_stop = dwc3_gadget_stop,
  2519. .udc_set_speed = dwc3_gadget_set_speed,
  2520. .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
  2521. .get_config_params = dwc3_gadget_config_params,
  2522. .vbus_draw = dwc3_gadget_vbus_draw,
  2523. .check_config = dwc3_gadget_check_config,
  2524. .udc_async_callbacks = dwc3_gadget_async_callbacks,
  2525. };
  2526. /* -------------------------------------------------------------------------- */
  2527. static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
  2528. {
  2529. struct dwc3 *dwc = dep->dwc;
  2530. usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
  2531. dep->endpoint.maxburst = 1;
  2532. dep->endpoint.ops = &dwc3_gadget_ep0_ops;
  2533. if (!dep->direction)
  2534. dwc->gadget->ep0 = &dep->endpoint;
  2535. dep->endpoint.caps.type_control = true;
  2536. return 0;
  2537. }
  2538. static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
  2539. {
  2540. struct dwc3 *dwc = dep->dwc;
  2541. u32 mdwidth;
  2542. int size;
  2543. int maxpacket;
  2544. mdwidth = dwc3_mdwidth(dwc);
  2545. /* MDWIDTH is represented in bits, we need it in bytes */
  2546. mdwidth /= 8;
  2547. size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
  2548. if (DWC3_IP_IS(DWC3))
  2549. size = DWC3_GTXFIFOSIZ_TXFDEP(size);
  2550. else
  2551. size = DWC31_GTXFIFOSIZ_TXFDEP(size);
  2552. /*
  2553. * maxpacket size is determined as part of the following, after assuming
  2554. * a mult value of one maxpacket:
  2555. * DWC3 revision 280A and prior:
  2556. * fifo_size = mult * (max_packet / mdwidth) + 1;
  2557. * maxpacket = mdwidth * (fifo_size - 1);
  2558. *
  2559. * DWC3 revision 290A and onwards:
  2560. * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
  2561. * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth;
  2562. */
  2563. if (DWC3_VER_IS_PRIOR(DWC3, 290A))
  2564. maxpacket = mdwidth * (size - 1);
  2565. else
  2566. maxpacket = mdwidth * ((size - 1) - 1) - mdwidth;
  2567. /* Functionally, space for one max packet is sufficient */
  2568. size = min_t(int, maxpacket, 1024);
  2569. usb_ep_set_maxpacket_limit(&dep->endpoint, size);
  2570. dep->endpoint.max_streams = 16;
  2571. dep->endpoint.ops = &dwc3_gadget_ep_ops;
  2572. list_add_tail(&dep->endpoint.ep_list,
  2573. &dwc->gadget->ep_list);
  2574. dep->endpoint.caps.type_iso = true;
  2575. dep->endpoint.caps.type_bulk = true;
  2576. dep->endpoint.caps.type_int = true;
  2577. return dwc3_alloc_trb_pool(dep);
  2578. }
  2579. static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
  2580. {
  2581. struct dwc3 *dwc = dep->dwc;
  2582. u32 mdwidth;
  2583. int size;
  2584. mdwidth = dwc3_mdwidth(dwc);
  2585. /* MDWIDTH is represented in bits, convert to bytes */
  2586. mdwidth /= 8;
  2587. /* All OUT endpoints share a single RxFIFO space */
  2588. size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
  2589. if (DWC3_IP_IS(DWC3))
  2590. size = DWC3_GRXFIFOSIZ_RXFDEP(size);
  2591. else
  2592. size = DWC31_GRXFIFOSIZ_RXFDEP(size);
  2593. /* FIFO depth is in MDWDITH bytes */
  2594. size *= mdwidth;
  2595. /*
  2596. * To meet performance requirement, a minimum recommended RxFIFO size
  2597. * is defined as follow:
  2598. * RxFIFO size >= (3 x MaxPacketSize) +
  2599. * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
  2600. *
  2601. * Then calculate the max packet limit as below.
  2602. */
  2603. size -= (3 * 8) + 16;
  2604. if (size < 0)
  2605. size = 0;
  2606. else
  2607. size /= 3;
  2608. usb_ep_set_maxpacket_limit(&dep->endpoint, size);
  2609. dep->endpoint.max_streams = 16;
  2610. dep->endpoint.ops = &dwc3_gadget_ep_ops;
  2611. list_add_tail(&dep->endpoint.ep_list,
  2612. &dwc->gadget->ep_list);
  2613. dep->endpoint.caps.type_iso = true;
  2614. dep->endpoint.caps.type_bulk = true;
  2615. dep->endpoint.caps.type_int = true;
  2616. return dwc3_alloc_trb_pool(dep);
  2617. }
  2618. static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
  2619. {
  2620. struct dwc3_ep *dep;
  2621. bool direction = epnum & 1;
  2622. int ret;
  2623. u8 num = epnum >> 1;
  2624. dep = kzalloc(sizeof(*dep), GFP_KERNEL);
  2625. if (!dep)
  2626. return -ENOMEM;
  2627. dep->dwc = dwc;
  2628. dep->number = epnum;
  2629. dep->direction = direction;
  2630. dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
  2631. dwc->eps[epnum] = dep;
  2632. dep->combo_num = 0;
  2633. dep->start_cmd_status = 0;
  2634. snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
  2635. direction ? "in" : "out");
  2636. dep->endpoint.name = dep->name;
  2637. if (!(dep->number > 1)) {
  2638. dep->endpoint.desc = &dwc3_gadget_ep0_desc;
  2639. dep->endpoint.comp_desc = NULL;
  2640. }
  2641. if (num == 0)
  2642. ret = dwc3_gadget_init_control_endpoint(dep);
  2643. else if (direction)
  2644. ret = dwc3_gadget_init_in_endpoint(dep);
  2645. else
  2646. ret = dwc3_gadget_init_out_endpoint(dep);
  2647. if (ret)
  2648. return ret;
  2649. dep->endpoint.caps.dir_in = direction;
  2650. dep->endpoint.caps.dir_out = !direction;
  2651. INIT_LIST_HEAD(&dep->pending_list);
  2652. INIT_LIST_HEAD(&dep->started_list);
  2653. INIT_LIST_HEAD(&dep->cancelled_list);
  2654. dwc3_debugfs_create_endpoint_dir(dep);
  2655. return 0;
  2656. }
  2657. static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
  2658. {
  2659. u8 epnum;
  2660. INIT_LIST_HEAD(&dwc->gadget->ep_list);
  2661. for (epnum = 0; epnum < total; epnum++) {
  2662. int ret;
  2663. ret = dwc3_gadget_init_endpoint(dwc, epnum);
  2664. if (ret)
  2665. return ret;
  2666. }
  2667. return 0;
  2668. }
  2669. static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
  2670. {
  2671. struct dwc3_ep *dep;
  2672. u8 epnum;
  2673. for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  2674. dep = dwc->eps[epnum];
  2675. if (!dep)
  2676. continue;
  2677. /*
  2678. * Physical endpoints 0 and 1 are special; they form the
  2679. * bi-directional USB endpoint 0.
  2680. *
  2681. * For those two physical endpoints, we don't allocate a TRB
  2682. * pool nor do we add them the endpoints list. Due to that, we
  2683. * shouldn't do these two operations otherwise we would end up
  2684. * with all sorts of bugs when removing dwc3.ko.
  2685. */
  2686. if (epnum != 0 && epnum != 1) {
  2687. dwc3_free_trb_pool(dep);
  2688. list_del(&dep->endpoint.ep_list);
  2689. }
  2690. dwc3_debugfs_remove_endpoint_dir(dep);
  2691. kfree(dep);
  2692. }
  2693. }
  2694. /* -------------------------------------------------------------------------- */
  2695. static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
  2696. struct dwc3_request *req, struct dwc3_trb *trb,
  2697. const struct dwc3_event_depevt *event, int status, int chain)
  2698. {
  2699. unsigned int count;
  2700. dwc3_ep_inc_deq(dep);
  2701. trace_dwc3_complete_trb(dep, trb);
  2702. req->num_trbs--;
  2703. /*
  2704. * If we're in the middle of series of chained TRBs and we
  2705. * receive a short transfer along the way, DWC3 will skip
  2706. * through all TRBs including the last TRB in the chain (the
  2707. * where CHN bit is zero. DWC3 will also avoid clearing HWO
  2708. * bit and SW has to do it manually.
  2709. *
  2710. * We're going to do that here to avoid problems of HW trying
  2711. * to use bogus TRBs for transfers.
  2712. */
  2713. if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
  2714. trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
  2715. /*
  2716. * For isochronous transfers, the first TRB in a service interval must
  2717. * have the Isoc-First type. Track and report its interval frame number.
  2718. */
  2719. if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
  2720. (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
  2721. unsigned int frame_number;
  2722. frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
  2723. frame_number &= ~(dep->interval - 1);
  2724. req->request.frame_number = frame_number;
  2725. }
  2726. /*
  2727. * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
  2728. * this TRB points to the bounce buffer address, it's a MPS alignment
  2729. * TRB. Don't add it to req->remaining calculation.
  2730. */
  2731. if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
  2732. trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
  2733. trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
  2734. return 1;
  2735. }
  2736. count = trb->size & DWC3_TRB_SIZE_MASK;
  2737. req->remaining += count;
  2738. if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
  2739. return 1;
  2740. if (event->status & DEPEVT_STATUS_SHORT && !chain)
  2741. return 1;
  2742. if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
  2743. DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
  2744. return 1;
  2745. if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
  2746. (trb->ctrl & DWC3_TRB_CTRL_LST))
  2747. return 1;
  2748. return 0;
  2749. }
  2750. static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
  2751. struct dwc3_request *req, const struct dwc3_event_depevt *event,
  2752. int status)
  2753. {
  2754. struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
  2755. struct scatterlist *sg = req->sg;
  2756. struct scatterlist *s;
  2757. unsigned int num_queued = req->num_queued_sgs;
  2758. unsigned int i;
  2759. int ret = 0;
  2760. for_each_sg(sg, s, num_queued, i) {
  2761. trb = &dep->trb_pool[dep->trb_dequeue];
  2762. req->sg = sg_next(s);
  2763. req->num_queued_sgs--;
  2764. ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
  2765. trb, event, status, true);
  2766. if (ret)
  2767. break;
  2768. }
  2769. return ret;
  2770. }
  2771. static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
  2772. struct dwc3_request *req, const struct dwc3_event_depevt *event,
  2773. int status)
  2774. {
  2775. struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
  2776. return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
  2777. event, status, false);
  2778. }
  2779. static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
  2780. {
  2781. return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
  2782. }
  2783. static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
  2784. const struct dwc3_event_depevt *event,
  2785. struct dwc3_request *req, int status)
  2786. {
  2787. int request_status;
  2788. int ret;
  2789. if (req->request.num_mapped_sgs)
  2790. ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
  2791. status);
  2792. else
  2793. ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
  2794. status);
  2795. req->request.actual = req->request.length - req->remaining;
  2796. if (!dwc3_gadget_ep_request_completed(req))
  2797. goto out;
  2798. if (req->needs_extra_trb) {
  2799. ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
  2800. status);
  2801. req->needs_extra_trb = false;
  2802. }
  2803. /*
  2804. * The event status only reflects the status of the TRB with IOC set.
  2805. * For the requests that don't set interrupt on completion, the driver
  2806. * needs to check and return the status of the completed TRBs associated
  2807. * with the request. Use the status of the last TRB of the request.
  2808. */
  2809. if (req->request.no_interrupt) {
  2810. struct dwc3_trb *trb;
  2811. trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
  2812. switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
  2813. case DWC3_TRBSTS_MISSED_ISOC:
  2814. /* Isoc endpoint only */
  2815. request_status = -EXDEV;
  2816. break;
  2817. case DWC3_TRB_STS_XFER_IN_PROG:
  2818. /* Applicable when End Transfer with ForceRM=0 */
  2819. case DWC3_TRBSTS_SETUP_PENDING:
  2820. /* Control endpoint only */
  2821. case DWC3_TRBSTS_OK:
  2822. default:
  2823. request_status = 0;
  2824. break;
  2825. }
  2826. } else {
  2827. request_status = status;
  2828. }
  2829. dwc3_gadget_giveback(dep, req, request_status);
  2830. out:
  2831. return ret;
  2832. }
  2833. static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
  2834. const struct dwc3_event_depevt *event, int status)
  2835. {
  2836. struct dwc3_request *req;
  2837. while (!list_empty(&dep->started_list)) {
  2838. int ret;
  2839. req = next_request(&dep->started_list);
  2840. ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
  2841. req, status);
  2842. if (ret)
  2843. break;
  2844. /*
  2845. * The endpoint is disabled, let the dwc3_remove_requests()
  2846. * handle the cleanup.
  2847. */
  2848. if (!dep->endpoint.desc)
  2849. break;
  2850. }
  2851. }
  2852. static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
  2853. {
  2854. struct dwc3_request *req;
  2855. struct dwc3 *dwc = dep->dwc;
  2856. if (!dep->endpoint.desc || !dwc->pullups_connected ||
  2857. !dwc->connected)
  2858. return false;
  2859. if (!list_empty(&dep->pending_list))
  2860. return true;
  2861. /*
  2862. * We only need to check the first entry of the started list. We can
  2863. * assume the completed requests are removed from the started list.
  2864. */
  2865. req = next_request(&dep->started_list);
  2866. if (!req)
  2867. return false;
  2868. return !dwc3_gadget_ep_request_completed(req);
  2869. }
  2870. static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
  2871. const struct dwc3_event_depevt *event)
  2872. {
  2873. dep->frame_number = event->parameters;
  2874. }
  2875. static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
  2876. const struct dwc3_event_depevt *event, int status)
  2877. {
  2878. struct dwc3 *dwc = dep->dwc;
  2879. bool no_started_trb = true;
  2880. dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
  2881. if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
  2882. goto out;
  2883. if (!dep->endpoint.desc)
  2884. return no_started_trb;
  2885. if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
  2886. list_empty(&dep->started_list) &&
  2887. (list_empty(&dep->pending_list) || status == -EXDEV))
  2888. dwc3_stop_active_transfer(dep, true, true);
  2889. else if (dwc3_gadget_ep_should_continue(dep))
  2890. if (__dwc3_gadget_kick_transfer(dep) == 0)
  2891. no_started_trb = false;
  2892. out:
  2893. /*
  2894. * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
  2895. * See dwc3_gadget_linksts_change_interrupt() for 1st half.
  2896. */
  2897. if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
  2898. u32 reg;
  2899. int i;
  2900. for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
  2901. dep = dwc->eps[i];
  2902. if (!(dep->flags & DWC3_EP_ENABLED))
  2903. continue;
  2904. if (!list_empty(&dep->started_list))
  2905. return no_started_trb;
  2906. }
  2907. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  2908. reg |= dwc->u1u2;
  2909. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  2910. dwc->u1u2 = 0;
  2911. }
  2912. return no_started_trb;
  2913. }
  2914. static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
  2915. const struct dwc3_event_depevt *event)
  2916. {
  2917. int status = 0;
  2918. if (!dep->endpoint.desc)
  2919. return;
  2920. if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
  2921. dwc3_gadget_endpoint_frame_from_event(dep, event);
  2922. if (event->status & DEPEVT_STATUS_BUSERR)
  2923. status = -ECONNRESET;
  2924. if (event->status & DEPEVT_STATUS_MISSED_ISOC)
  2925. status = -EXDEV;
  2926. dwc3_gadget_endpoint_trbs_complete(dep, event, status);
  2927. }
  2928. static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
  2929. const struct dwc3_event_depevt *event)
  2930. {
  2931. int status = 0;
  2932. dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
  2933. if (event->status & DEPEVT_STATUS_BUSERR)
  2934. status = -ECONNRESET;
  2935. if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
  2936. dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
  2937. }
  2938. static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
  2939. const struct dwc3_event_depevt *event)
  2940. {
  2941. dwc3_gadget_endpoint_frame_from_event(dep, event);
  2942. /*
  2943. * The XferNotReady event is generated only once before the endpoint
  2944. * starts. It will be generated again when END_TRANSFER command is
  2945. * issued. For some controller versions, the XferNotReady event may be
  2946. * generated while the END_TRANSFER command is still in process. Ignore
  2947. * it and wait for the next XferNotReady event after the command is
  2948. * completed.
  2949. */
  2950. if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
  2951. return;
  2952. (void) __dwc3_gadget_start_isoc(dep);
  2953. }
  2954. static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
  2955. const struct dwc3_event_depevt *event)
  2956. {
  2957. u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
  2958. if (cmd != DWC3_DEPCMD_ENDTRANSFER)
  2959. return;
  2960. /*
  2961. * The END_TRANSFER command will cause the controller to generate a
  2962. * NoStream Event, and it's not due to the host DP NoStream rejection.
  2963. * Ignore the next NoStream event.
  2964. */
  2965. if (dep->stream_capable)
  2966. dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
  2967. dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
  2968. dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
  2969. dwc3_gadget_ep_cleanup_cancelled_requests(dep);
  2970. if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
  2971. struct dwc3 *dwc = dep->dwc;
  2972. dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
  2973. if (dwc3_send_clear_stall_ep_cmd(dep)) {
  2974. struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
  2975. dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
  2976. if (dwc->delayed_status)
  2977. __dwc3_gadget_ep0_set_halt(ep0, 1);
  2978. return;
  2979. }
  2980. dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
  2981. if (dwc->clear_stall_protocol == dep->number)
  2982. dwc3_ep0_send_delayed_status(dwc);
  2983. }
  2984. if ((dep->flags & DWC3_EP_DELAY_START) &&
  2985. !usb_endpoint_xfer_isoc(dep->endpoint.desc))
  2986. __dwc3_gadget_kick_transfer(dep);
  2987. dep->flags &= ~DWC3_EP_DELAY_START;
  2988. }
  2989. static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
  2990. const struct dwc3_event_depevt *event)
  2991. {
  2992. struct dwc3 *dwc = dep->dwc;
  2993. if (event->status == DEPEVT_STREAMEVT_FOUND) {
  2994. dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
  2995. goto out;
  2996. }
  2997. /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
  2998. switch (event->parameters) {
  2999. case DEPEVT_STREAM_PRIME:
  3000. /*
  3001. * If the host can properly transition the endpoint state from
  3002. * idle to prime after a NoStream rejection, there's no need to
  3003. * force restarting the endpoint to reinitiate the stream. To
  3004. * simplify the check, assume the host follows the USB spec if
  3005. * it primed the endpoint more than once.
  3006. */
  3007. if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
  3008. if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
  3009. dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
  3010. else
  3011. dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
  3012. }
  3013. break;
  3014. case DEPEVT_STREAM_NOSTREAM:
  3015. if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
  3016. !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
  3017. (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
  3018. !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
  3019. break;
  3020. /*
  3021. * If the host rejects a stream due to no active stream, by the
  3022. * USB and xHCI spec, the endpoint will be put back to idle
  3023. * state. When the host is ready (buffer added/updated), it will
  3024. * prime the endpoint to inform the usb device controller. This
  3025. * triggers the device controller to issue ERDY to restart the
  3026. * stream. However, some hosts don't follow this and keep the
  3027. * endpoint in the idle state. No prime will come despite host
  3028. * streams are updated, and the device controller will not be
  3029. * triggered to generate ERDY to move the next stream data. To
  3030. * workaround this and maintain compatibility with various
  3031. * hosts, force to reinitiate the stream until the host is ready
  3032. * instead of waiting for the host to prime the endpoint.
  3033. */
  3034. if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
  3035. unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
  3036. dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
  3037. } else {
  3038. dep->flags |= DWC3_EP_DELAY_START;
  3039. dwc3_stop_active_transfer(dep, true, true);
  3040. return;
  3041. }
  3042. break;
  3043. }
  3044. out:
  3045. dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
  3046. }
  3047. static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
  3048. const struct dwc3_event_depevt *event)
  3049. {
  3050. struct dwc3_ep *dep;
  3051. u8 epnum = event->endpoint_number;
  3052. dep = dwc->eps[epnum];
  3053. if (!(dep->flags & DWC3_EP_ENABLED)) {
  3054. if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
  3055. return;
  3056. /* Handle only EPCMDCMPLT when EP disabled */
  3057. if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
  3058. !(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
  3059. return;
  3060. }
  3061. if (epnum == 0 || epnum == 1) {
  3062. dwc3_ep0_interrupt(dwc, event);
  3063. return;
  3064. }
  3065. switch (event->endpoint_event) {
  3066. case DWC3_DEPEVT_XFERINPROGRESS:
  3067. dwc3_gadget_endpoint_transfer_in_progress(dep, event);
  3068. break;
  3069. case DWC3_DEPEVT_XFERNOTREADY:
  3070. dwc3_gadget_endpoint_transfer_not_ready(dep, event);
  3071. break;
  3072. case DWC3_DEPEVT_EPCMDCMPLT:
  3073. dwc3_gadget_endpoint_command_complete(dep, event);
  3074. break;
  3075. case DWC3_DEPEVT_XFERCOMPLETE:
  3076. dwc3_gadget_endpoint_transfer_complete(dep, event);
  3077. break;
  3078. case DWC3_DEPEVT_STREAMEVT:
  3079. dwc3_gadget_endpoint_stream_event(dep, event);
  3080. break;
  3081. case DWC3_DEPEVT_RXTXFIFOEVT:
  3082. break;
  3083. }
  3084. }
  3085. static void dwc3_disconnect_gadget(struct dwc3 *dwc)
  3086. {
  3087. if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
  3088. spin_unlock(&dwc->lock);
  3089. dwc->gadget_driver->disconnect(dwc->gadget);
  3090. spin_lock(&dwc->lock);
  3091. }
  3092. }
  3093. static void dwc3_suspend_gadget(struct dwc3 *dwc)
  3094. {
  3095. if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
  3096. spin_unlock(&dwc->lock);
  3097. dwc->gadget_driver->suspend(dwc->gadget);
  3098. spin_lock(&dwc->lock);
  3099. }
  3100. }
  3101. static void dwc3_resume_gadget(struct dwc3 *dwc)
  3102. {
  3103. if (dwc->async_callbacks && dwc->gadget_driver->resume) {
  3104. spin_unlock(&dwc->lock);
  3105. dwc->gadget_driver->resume(dwc->gadget);
  3106. spin_lock(&dwc->lock);
  3107. }
  3108. }
  3109. static void dwc3_reset_gadget(struct dwc3 *dwc)
  3110. {
  3111. if (!dwc->gadget_driver)
  3112. return;
  3113. if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
  3114. spin_unlock(&dwc->lock);
  3115. usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
  3116. spin_lock(&dwc->lock);
  3117. }
  3118. }
  3119. void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
  3120. bool interrupt)
  3121. {
  3122. struct dwc3 *dwc = dep->dwc;
  3123. /*
  3124. * Only issue End Transfer command to the control endpoint of a started
  3125. * Data Phase. Typically we should only do so in error cases such as
  3126. * invalid/unexpected direction as described in the control transfer
  3127. * flow of the programming guide.
  3128. */
  3129. if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
  3130. return;
  3131. if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
  3132. return;
  3133. if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
  3134. (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
  3135. return;
  3136. /*
  3137. * If a Setup packet is received but yet to DMA out, the controller will
  3138. * not process the End Transfer command of any endpoint. Polling of its
  3139. * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
  3140. * timeout. Delay issuing the End Transfer command until the Setup TRB is
  3141. * prepared.
  3142. */
  3143. if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
  3144. dep->flags |= DWC3_EP_DELAY_STOP;
  3145. return;
  3146. }
  3147. /*
  3148. * NOTICE: We are violating what the Databook says about the
  3149. * EndTransfer command. Ideally we would _always_ wait for the
  3150. * EndTransfer Command Completion IRQ, but that's causing too
  3151. * much trouble synchronizing between us and gadget driver.
  3152. *
  3153. * We have discussed this with the IP Provider and it was
  3154. * suggested to giveback all requests here.
  3155. *
  3156. * Note also that a similar handling was tested by Synopsys
  3157. * (thanks a lot Paul) and nothing bad has come out of it.
  3158. * In short, what we're doing is issuing EndTransfer with
  3159. * CMDIOC bit set and delay kicking transfer until the
  3160. * EndTransfer command had completed.
  3161. *
  3162. * As of IP version 3.10a of the DWC_usb3 IP, the controller
  3163. * supports a mode to work around the above limitation. The
  3164. * software can poll the CMDACT bit in the DEPCMD register
  3165. * after issuing a EndTransfer command. This mode is enabled
  3166. * by writing GUCTL2[14]. This polling is already done in the
  3167. * dwc3_send_gadget_ep_cmd() function so if the mode is
  3168. * enabled, the EndTransfer command will have completed upon
  3169. * returning from this function.
  3170. *
  3171. * This mode is NOT available on the DWC_usb31 IP. In this
  3172. * case, if the IOC bit is not set, then delay by 1ms
  3173. * after issuing the EndTransfer command. This allows for the
  3174. * controller to handle the command completely before DWC3
  3175. * remove requests attempts to unmap USB request buffers.
  3176. */
  3177. __dwc3_stop_active_transfer(dep, force, interrupt);
  3178. }
  3179. static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
  3180. {
  3181. u32 epnum;
  3182. for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  3183. struct dwc3_ep *dep;
  3184. int ret;
  3185. dep = dwc->eps[epnum];
  3186. if (!dep)
  3187. continue;
  3188. if (!(dep->flags & DWC3_EP_STALL))
  3189. continue;
  3190. dep->flags &= ~DWC3_EP_STALL;
  3191. ret = dwc3_send_clear_stall_ep_cmd(dep);
  3192. WARN_ON_ONCE(ret);
  3193. }
  3194. }
  3195. static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
  3196. {
  3197. int reg;
  3198. dwc->suspended = false;
  3199. dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
  3200. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  3201. reg &= ~DWC3_DCTL_INITU1ENA;
  3202. reg &= ~DWC3_DCTL_INITU2ENA;
  3203. dwc3_gadget_dctl_write_safe(dwc, reg);
  3204. dwc->connected = false;
  3205. dev_info(dwc->dev, "%s dwc->connected: %d\n", __func__, dwc->connected);
  3206. dwc3_disconnect_gadget(dwc);
  3207. dwc->gadget->speed = USB_SPEED_UNKNOWN;
  3208. dwc->setup_packet_pending = false;
  3209. usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
  3210. dwc3_ep0_reset_state(dwc);
  3211. /*
  3212. * Request PM idle to address condition where usage count is
  3213. * already decremented to zero, but waiting for the disconnect
  3214. * interrupt to set dwc->connected to FALSE.
  3215. */
  3216. pm_request_idle(dwc->dev);
  3217. }
  3218. static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
  3219. {
  3220. u32 reg;
  3221. dwc->suspended = false;
  3222. /*
  3223. * Ideally, dwc3_reset_gadget() would trigger the function
  3224. * drivers to stop any active transfers through ep disable.
  3225. * However, for functions which defer ep disable, such as mass
  3226. * storage, we will need to rely on the call to stop active
  3227. * transfers here, and avoid allowing of request queuing.
  3228. */
  3229. dwc->connected = false;
  3230. dev_info(dwc->dev, "%s dwc->connected: %d\n", __func__, dwc->connected);
  3231. /*
  3232. * WORKAROUND: DWC3 revisions <1.88a have an issue which
  3233. * would cause a missing Disconnect Event if there's a
  3234. * pending Setup Packet in the FIFO.
  3235. *
  3236. * There's no suggested workaround on the official Bug
  3237. * report, which states that "unless the driver/application
  3238. * is doing any special handling of a disconnect event,
  3239. * there is no functional issue".
  3240. *
  3241. * Unfortunately, it turns out that we _do_ some special
  3242. * handling of a disconnect event, namely complete all
  3243. * pending transfers, notify gadget driver of the
  3244. * disconnection, and so on.
  3245. *
  3246. * Our suggested workaround is to follow the Disconnect
  3247. * Event steps here, instead, based on a setup_packet_pending
  3248. * flag. Such flag gets set whenever we have a SETUP_PENDING
  3249. * status for EP0 TRBs and gets cleared on XferComplete for the
  3250. * same endpoint.
  3251. *
  3252. * Refers to:
  3253. *
  3254. * STAR#9000466709: RTL: Device : Disconnect event not
  3255. * generated if setup packet pending in FIFO
  3256. */
  3257. if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
  3258. if (dwc->setup_packet_pending)
  3259. dwc3_gadget_disconnect_interrupt(dwc);
  3260. }
  3261. dwc3_reset_gadget(dwc);
  3262. /*
  3263. * From SNPS databook section 8.1.2, the EP0 should be in setup
  3264. * phase. So ensure that EP0 is in setup phase by issuing a stall
  3265. * and restart if EP0 is not in setup phase.
  3266. */
  3267. dwc3_ep0_reset_state(dwc);
  3268. /*
  3269. * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
  3270. * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
  3271. * needs to ensure that it sends "a DEPENDXFER command for any active
  3272. * transfers."
  3273. */
  3274. dwc3_stop_active_transfers(dwc);
  3275. dwc->connected = true;
  3276. dev_info(dwc->dev, "%s dwc->connected: %d\n", __func__, dwc->connected);
  3277. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  3278. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  3279. dwc3_gadget_dctl_write_safe(dwc, reg);
  3280. dwc->test_mode = false;
  3281. dwc3_clear_stall_all_ep(dwc);
  3282. /* Reset device address to zero */
  3283. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  3284. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  3285. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  3286. dwc->link_state = DWC3_LINK_STATE_RESET;
  3287. }
  3288. static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
  3289. {
  3290. struct dwc3_ep *dep;
  3291. int ret;
  3292. u32 reg;
  3293. u8 lanes = 1;
  3294. u8 speed;
  3295. if (!dwc->softconnect)
  3296. return;
  3297. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  3298. speed = reg & DWC3_DSTS_CONNECTSPD;
  3299. dwc->speed = speed;
  3300. if (DWC3_IP_IS(DWC32))
  3301. lanes = DWC3_DSTS_CONNLANES(reg) + 1;
  3302. dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
  3303. /*
  3304. * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
  3305. * each time on Connect Done.
  3306. *
  3307. * Currently we always use the reset value. If any platform
  3308. * wants to set this to a different value, we need to add a
  3309. * setting and update GCTL.RAMCLKSEL here.
  3310. */
  3311. switch (speed) {
  3312. case DWC3_DSTS_SUPERSPEED_PLUS:
  3313. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  3314. dwc->gadget->ep0->maxpacket = 512;
  3315. dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
  3316. if (lanes > 1)
  3317. dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
  3318. else
  3319. dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
  3320. break;
  3321. case DWC3_DSTS_SUPERSPEED:
  3322. /*
  3323. * WORKAROUND: DWC3 revisions <1.90a have an issue which
  3324. * would cause a missing USB3 Reset event.
  3325. *
  3326. * In such situations, we should force a USB3 Reset
  3327. * event by calling our dwc3_gadget_reset_interrupt()
  3328. * routine.
  3329. *
  3330. * Refers to:
  3331. *
  3332. * STAR#9000483510: RTL: SS : USB3 reset event may
  3333. * not be generated always when the link enters poll
  3334. */
  3335. if (DWC3_VER_IS_PRIOR(DWC3, 190A))
  3336. dwc3_gadget_reset_interrupt(dwc);
  3337. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  3338. dwc->gadget->ep0->maxpacket = 512;
  3339. dwc->gadget->speed = USB_SPEED_SUPER;
  3340. if (lanes > 1) {
  3341. dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
  3342. dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
  3343. }
  3344. break;
  3345. case DWC3_DSTS_HIGHSPEED:
  3346. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  3347. dwc->gadget->ep0->maxpacket = 64;
  3348. dwc->gadget->speed = USB_SPEED_HIGH;
  3349. break;
  3350. case DWC3_DSTS_FULLSPEED:
  3351. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  3352. dwc->gadget->ep0->maxpacket = 64;
  3353. dwc->gadget->speed = USB_SPEED_FULL;
  3354. break;
  3355. }
  3356. dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
  3357. /* Enable USB2 LPM Capability */
  3358. if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
  3359. !dwc->usb2_gadget_lpm_disable &&
  3360. (speed != DWC3_DSTS_SUPERSPEED) &&
  3361. (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
  3362. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  3363. reg |= DWC3_DCFG_LPM_CAP;
  3364. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  3365. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  3366. reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
  3367. reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
  3368. (dwc->is_utmi_l1_suspend << 4));
  3369. /*
  3370. * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
  3371. * DCFG.LPMCap is set, core responses with an ACK and the
  3372. * BESL value in the LPM token is less than or equal to LPM
  3373. * NYET threshold.
  3374. */
  3375. WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
  3376. "LPM Erratum not available on dwc3 revisions < 2.40a\n");
  3377. if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
  3378. reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
  3379. dwc3_gadget_dctl_write_safe(dwc, reg);
  3380. } else {
  3381. if (dwc->usb2_gadget_lpm_disable) {
  3382. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  3383. reg &= ~DWC3_DCFG_LPM_CAP;
  3384. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  3385. }
  3386. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  3387. reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
  3388. dwc3_gadget_dctl_write_safe(dwc, reg);
  3389. }
  3390. dep = dwc->eps[0];
  3391. ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
  3392. if (ret) {
  3393. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  3394. return;
  3395. }
  3396. dep = dwc->eps[1];
  3397. ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
  3398. if (ret) {
  3399. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  3400. return;
  3401. }
  3402. /*
  3403. * Configure PHY via GUSB3PIPECTLn if required.
  3404. *
  3405. * Update GTXFIFOSIZn
  3406. *
  3407. * In both cases reset values should be sufficient.
  3408. */
  3409. }
  3410. static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
  3411. {
  3412. dwc->suspended = false;
  3413. /*
  3414. * TODO take core out of low power mode when that's
  3415. * implemented.
  3416. */
  3417. if (dwc->async_callbacks && dwc->gadget_driver->resume) {
  3418. spin_unlock(&dwc->lock);
  3419. dwc->gadget_driver->resume(dwc->gadget);
  3420. spin_lock(&dwc->lock);
  3421. }
  3422. dwc->link_state = DWC3_LINK_STATE_RESUME;
  3423. }
  3424. static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
  3425. unsigned int evtinfo)
  3426. {
  3427. enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
  3428. unsigned int pwropt;
  3429. /*
  3430. * WORKAROUND: DWC3 < 2.50a have an issue when configured without
  3431. * Hibernation mode enabled which would show up when device detects
  3432. * host-initiated U3 exit.
  3433. *
  3434. * In that case, device will generate a Link State Change Interrupt
  3435. * from U3 to RESUME which is only necessary if Hibernation is
  3436. * configured in.
  3437. *
  3438. * There are no functional changes due to such spurious event and we
  3439. * just need to ignore it.
  3440. *
  3441. * Refers to:
  3442. *
  3443. * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
  3444. * operational mode
  3445. */
  3446. pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
  3447. if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
  3448. (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
  3449. if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
  3450. (next == DWC3_LINK_STATE_RESUME)) {
  3451. return;
  3452. }
  3453. }
  3454. /*
  3455. * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
  3456. * on the link partner, the USB session might do multiple entry/exit
  3457. * of low power states before a transfer takes place.
  3458. *
  3459. * Due to this problem, we might experience lower throughput. The
  3460. * suggested workaround is to disable DCTL[12:9] bits if we're
  3461. * transitioning from U1/U2 to U0 and enable those bits again
  3462. * after a transfer completes and there are no pending transfers
  3463. * on any of the enabled endpoints.
  3464. *
  3465. * This is the first half of that workaround.
  3466. *
  3467. * Refers to:
  3468. *
  3469. * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
  3470. * core send LGO_Ux entering U0
  3471. */
  3472. if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
  3473. if (next == DWC3_LINK_STATE_U0) {
  3474. u32 u1u2;
  3475. u32 reg;
  3476. switch (dwc->link_state) {
  3477. case DWC3_LINK_STATE_U1:
  3478. case DWC3_LINK_STATE_U2:
  3479. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  3480. u1u2 = reg & (DWC3_DCTL_INITU2ENA
  3481. | DWC3_DCTL_ACCEPTU2ENA
  3482. | DWC3_DCTL_INITU1ENA
  3483. | DWC3_DCTL_ACCEPTU1ENA);
  3484. if (!dwc->u1u2)
  3485. dwc->u1u2 = reg & u1u2;
  3486. reg &= ~u1u2;
  3487. dwc3_gadget_dctl_write_safe(dwc, reg);
  3488. break;
  3489. default:
  3490. /* do nothing */
  3491. break;
  3492. }
  3493. }
  3494. }
  3495. switch (next) {
  3496. case DWC3_LINK_STATE_U1:
  3497. if (dwc->speed == USB_SPEED_SUPER)
  3498. dwc3_suspend_gadget(dwc);
  3499. break;
  3500. case DWC3_LINK_STATE_U2:
  3501. case DWC3_LINK_STATE_U3:
  3502. dwc3_suspend_gadget(dwc);
  3503. break;
  3504. case DWC3_LINK_STATE_RESUME:
  3505. dwc3_resume_gadget(dwc);
  3506. break;
  3507. default:
  3508. /* do nothing */
  3509. break;
  3510. }
  3511. dwc->link_state = next;
  3512. }
  3513. static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
  3514. unsigned int evtinfo)
  3515. {
  3516. enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
  3517. if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
  3518. dwc->suspended = true;
  3519. dwc3_suspend_gadget(dwc);
  3520. }
  3521. dwc->link_state = next;
  3522. }
  3523. static void dwc3_gadget_interrupt(struct dwc3 *dwc,
  3524. const struct dwc3_event_devt *event)
  3525. {
  3526. switch (event->type) {
  3527. case DWC3_DEVICE_EVENT_DISCONNECT:
  3528. dwc3_gadget_disconnect_interrupt(dwc);
  3529. break;
  3530. case DWC3_DEVICE_EVENT_RESET:
  3531. dwc3_gadget_reset_interrupt(dwc);
  3532. break;
  3533. case DWC3_DEVICE_EVENT_CONNECT_DONE:
  3534. dwc3_gadget_conndone_interrupt(dwc);
  3535. break;
  3536. case DWC3_DEVICE_EVENT_WAKEUP:
  3537. dwc3_gadget_wakeup_interrupt(dwc);
  3538. break;
  3539. case DWC3_DEVICE_EVENT_HIBER_REQ:
  3540. dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n");
  3541. break;
  3542. case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
  3543. dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
  3544. break;
  3545. case DWC3_DEVICE_EVENT_SUSPEND:
  3546. /* It changed to be suspend event for version 2.30a and above */
  3547. if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
  3548. dwc3_gadget_suspend_interrupt(dwc, event->event_info);
  3549. break;
  3550. case DWC3_DEVICE_EVENT_SOF:
  3551. case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
  3552. case DWC3_DEVICE_EVENT_CMD_CMPL:
  3553. case DWC3_DEVICE_EVENT_OVERFLOW:
  3554. break;
  3555. default:
  3556. dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
  3557. }
  3558. }
  3559. static void dwc3_process_event_entry(struct dwc3 *dwc,
  3560. const union dwc3_event *event)
  3561. {
  3562. trace_dwc3_event(event->raw, dwc);
  3563. if (!event->type.is_devspec)
  3564. dwc3_endpoint_interrupt(dwc, &event->depevt);
  3565. else if (event->type.type == DWC3_EVENT_TYPE_DEV)
  3566. dwc3_gadget_interrupt(dwc, &event->devt);
  3567. else
  3568. dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
  3569. }
  3570. static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
  3571. {
  3572. struct dwc3 *dwc = evt->dwc;
  3573. irqreturn_t ret = IRQ_NONE;
  3574. int left;
  3575. left = evt->count;
  3576. if (!(evt->flags & DWC3_EVENT_PENDING))
  3577. return IRQ_NONE;
  3578. while (left > 0) {
  3579. union dwc3_event event;
  3580. event.raw = *(u32 *) (evt->cache + evt->lpos);
  3581. dwc3_process_event_entry(dwc, &event);
  3582. /*
  3583. * FIXME we wrap around correctly to the next entry as
  3584. * almost all entries are 4 bytes in size. There is one
  3585. * entry which has 12 bytes which is a regular entry
  3586. * followed by 8 bytes data. ATM I don't know how
  3587. * things are organized if we get next to the a
  3588. * boundary so I worry about that once we try to handle
  3589. * that.
  3590. */
  3591. evt->lpos = (evt->lpos + 4) % evt->length;
  3592. left -= 4;
  3593. }
  3594. evt->count = 0;
  3595. ret = IRQ_HANDLED;
  3596. /* Unmask interrupt */
  3597. dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
  3598. DWC3_GEVNTSIZ_SIZE(evt->length));
  3599. if (dwc->imod_interval) {
  3600. dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
  3601. dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
  3602. }
  3603. /* Keep the clearing of DWC3_EVENT_PENDING at the end */
  3604. evt->flags &= ~DWC3_EVENT_PENDING;
  3605. return ret;
  3606. }
  3607. static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
  3608. {
  3609. struct dwc3_event_buffer *evt = _evt;
  3610. struct dwc3 *dwc = evt->dwc;
  3611. unsigned long flags;
  3612. irqreturn_t ret = IRQ_NONE;
  3613. local_bh_disable();
  3614. spin_lock_irqsave(&dwc->lock, flags);
  3615. ret = dwc3_process_event_buf(evt);
  3616. spin_unlock_irqrestore(&dwc->lock, flags);
  3617. local_bh_enable();
  3618. return ret;
  3619. }
  3620. static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
  3621. {
  3622. struct dwc3 *dwc = evt->dwc;
  3623. u32 amount;
  3624. u32 count;
  3625. if (pm_runtime_suspended(dwc->dev)) {
  3626. dwc->pending_events = true;
  3627. /*
  3628. * Trigger runtime resume. The get() function will be balanced
  3629. * after processing the pending events in dwc3_process_pending
  3630. * events().
  3631. */
  3632. pm_runtime_get(dwc->dev);
  3633. disable_irq_nosync(dwc->irq_gadget);
  3634. return IRQ_HANDLED;
  3635. }
  3636. /*
  3637. * With PCIe legacy interrupt, test shows that top-half irq handler can
  3638. * be called again after HW interrupt deassertion. Check if bottom-half
  3639. * irq event handler completes before caching new event to prevent
  3640. * losing events.
  3641. */
  3642. if (evt->flags & DWC3_EVENT_PENDING)
  3643. return IRQ_HANDLED;
  3644. count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
  3645. count &= DWC3_GEVNTCOUNT_MASK;
  3646. if (!count)
  3647. return IRQ_NONE;
  3648. evt->count = count;
  3649. evt->flags |= DWC3_EVENT_PENDING;
  3650. /* Mask interrupt */
  3651. dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
  3652. DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length));
  3653. amount = min(count, evt->length - evt->lpos);
  3654. memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
  3655. if (amount < count)
  3656. memcpy(evt->cache, evt->buf, count - amount);
  3657. dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
  3658. return IRQ_WAKE_THREAD;
  3659. }
  3660. static irqreturn_t dwc3_interrupt(int irq, void *_evt)
  3661. {
  3662. struct dwc3_event_buffer *evt = _evt;
  3663. return dwc3_check_event_buf(evt);
  3664. }
  3665. static int dwc3_gadget_get_irq(struct dwc3 *dwc)
  3666. {
  3667. struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
  3668. int irq;
  3669. irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
  3670. if (irq > 0)
  3671. goto out;
  3672. if (irq == -EPROBE_DEFER)
  3673. goto out;
  3674. irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
  3675. if (irq > 0)
  3676. goto out;
  3677. if (irq == -EPROBE_DEFER)
  3678. goto out;
  3679. irq = platform_get_irq(dwc3_pdev, 0);
  3680. if (irq > 0)
  3681. goto out;
  3682. if (!irq)
  3683. irq = -EINVAL;
  3684. out:
  3685. return irq;
  3686. }
  3687. static void dwc_gadget_release(struct device *dev)
  3688. {
  3689. struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
  3690. kfree(gadget);
  3691. }
  3692. /**
  3693. * dwc3_gadget_init - initializes gadget related registers
  3694. * @dwc: pointer to our controller context structure
  3695. *
  3696. * Returns 0 on success otherwise negative errno.
  3697. */
  3698. int dwc3_gadget_init(struct dwc3 *dwc)
  3699. {
  3700. int ret;
  3701. int irq;
  3702. struct device *dev;
  3703. irq = dwc3_gadget_get_irq(dwc);
  3704. if (irq < 0) {
  3705. ret = irq;
  3706. goto err0;
  3707. }
  3708. dwc->irq_gadget = irq;
  3709. dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
  3710. sizeof(*dwc->ep0_trb) * 2,
  3711. &dwc->ep0_trb_addr, GFP_KERNEL);
  3712. if (!dwc->ep0_trb) {
  3713. dev_err(dwc->dev, "failed to allocate ep0 trb\n");
  3714. ret = -ENOMEM;
  3715. goto err0;
  3716. }
  3717. dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
  3718. if (!dwc->setup_buf) {
  3719. ret = -ENOMEM;
  3720. goto err1;
  3721. }
  3722. dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
  3723. &dwc->bounce_addr, GFP_KERNEL);
  3724. if (!dwc->bounce) {
  3725. ret = -ENOMEM;
  3726. goto err2;
  3727. }
  3728. init_completion(&dwc->ep0_in_setup);
  3729. dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
  3730. if (!dwc->gadget) {
  3731. ret = -ENOMEM;
  3732. goto err3;
  3733. }
  3734. usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
  3735. dev = &dwc->gadget->dev;
  3736. dev->platform_data = dwc;
  3737. dwc->gadget->ops = &dwc3_gadget_ops;
  3738. dwc->gadget->speed = USB_SPEED_UNKNOWN;
  3739. dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
  3740. dwc->gadget->sg_supported = true;
  3741. dwc->gadget->name = "dwc3-gadget";
  3742. dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable;
  3743. /*
  3744. * FIXME We might be setting max_speed to <SUPER, however versions
  3745. * <2.20a of dwc3 have an issue with metastability (documented
  3746. * elsewhere in this driver) which tells us we can't set max speed to
  3747. * anything lower than SUPER.
  3748. *
  3749. * Because gadget.max_speed is only used by composite.c and function
  3750. * drivers (i.e. it won't go into dwc3's registers) we are allowing this
  3751. * to happen so we avoid sending SuperSpeed Capability descriptor
  3752. * together with our BOS descriptor as that could confuse host into
  3753. * thinking we can handle super speed.
  3754. *
  3755. * Note that, in fact, we won't even support GetBOS requests when speed
  3756. * is less than super speed because we don't have means, yet, to tell
  3757. * composite.c that we are USB 2.0 + LPM ECN.
  3758. */
  3759. if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
  3760. !dwc->dis_metastability_quirk)
  3761. dev_info(dwc->dev, "changing max_speed on rev %08x\n",
  3762. dwc->revision);
  3763. dwc->gadget->max_speed = dwc->maximum_speed;
  3764. dwc->gadget->max_ssp_rate = dwc->max_ssp_rate;
  3765. /*
  3766. * REVISIT: Here we should clear all pending IRQs to be
  3767. * sure we're starting from a well known location.
  3768. */
  3769. ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
  3770. if (ret)
  3771. goto err4;
  3772. ret = usb_add_gadget(dwc->gadget);
  3773. if (ret) {
  3774. dev_err(dwc->dev, "failed to add gadget\n");
  3775. goto err5;
  3776. }
  3777. if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
  3778. dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
  3779. else
  3780. dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
  3781. return 0;
  3782. err5:
  3783. dwc3_gadget_free_endpoints(dwc);
  3784. err4:
  3785. usb_put_gadget(dwc->gadget);
  3786. dwc->gadget = NULL;
  3787. err3:
  3788. dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
  3789. dwc->bounce_addr);
  3790. err2:
  3791. kfree(dwc->setup_buf);
  3792. err1:
  3793. dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
  3794. dwc->ep0_trb, dwc->ep0_trb_addr);
  3795. err0:
  3796. return ret;
  3797. }
  3798. /* -------------------------------------------------------------------------- */
  3799. void dwc3_gadget_exit(struct dwc3 *dwc)
  3800. {
  3801. if (!dwc->gadget)
  3802. return;
  3803. usb_del_gadget(dwc->gadget);
  3804. dwc3_gadget_free_endpoints(dwc);
  3805. usb_put_gadget(dwc->gadget);
  3806. dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
  3807. dwc->bounce_addr);
  3808. kfree(dwc->setup_buf);
  3809. dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
  3810. dwc->ep0_trb, dwc->ep0_trb_addr);
  3811. }
  3812. int dwc3_gadget_suspend(struct dwc3 *dwc)
  3813. {
  3814. unsigned long flags;
  3815. int ret;
  3816. ret = dwc3_gadget_soft_disconnect(dwc);
  3817. if (ret)
  3818. goto err;
  3819. spin_lock_irqsave(&dwc->lock, flags);
  3820. if (dwc->gadget_driver)
  3821. dwc3_disconnect_gadget(dwc);
  3822. spin_unlock_irqrestore(&dwc->lock, flags);
  3823. return 0;
  3824. err:
  3825. /*
  3826. * Attempt to reset the controller's state. Likely no
  3827. * communication can be established until the host
  3828. * performs a port reset.
  3829. */
  3830. if (dwc->softconnect)
  3831. dwc3_gadget_soft_connect(dwc);
  3832. return ret;
  3833. }
  3834. int dwc3_gadget_resume(struct dwc3 *dwc)
  3835. {
  3836. if (!dwc->gadget_driver || !dwc->softconnect)
  3837. return 0;
  3838. return dwc3_gadget_soft_connect(dwc);
  3839. }
  3840. void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
  3841. {
  3842. if (dwc->pending_events) {
  3843. dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
  3844. dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
  3845. pm_runtime_put(dwc->dev);
  3846. dwc->pending_events = false;
  3847. enable_irq(dwc->irq_gadget);
  3848. }
  3849. }