gr_udc.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  4. *
  5. * 2013 (c) Aeroflex Gaisler AB
  6. *
  7. * This driver supports GRUSBDC USB Device Controller cores available in the
  8. * GRLIB VHDL IP core library.
  9. *
  10. * Full documentation of the GRUSBDC core can be found here:
  11. * https://www.gaisler.com/products/grlib/grip.pdf
  12. *
  13. * Contributors:
  14. * - Andreas Larsson <[email protected]>
  15. * - Marko Isomaki
  16. */
  17. /*
  18. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  19. * individually configurable to any of the four USB transfer types. This driver
  20. * only supports cores in DMA mode.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/errno.h>
  27. #include <linux/list.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/device.h>
  30. #include <linux/usb.h>
  31. #include <linux/usb/ch9.h>
  32. #include <linux/usb/gadget.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/dmapool.h>
  35. #include <linux/debugfs.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/of_platform.h>
  38. #include <linux/of_irq.h>
  39. #include <linux/of_address.h>
  40. #include <asm/byteorder.h>
  41. #include "gr_udc.h"
  42. #define DRIVER_NAME "gr_udc"
  43. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  44. static const char driver_name[] = DRIVER_NAME;
  45. #define gr_read32(x) (ioread32be((x)))
  46. #define gr_write32(x, v) (iowrite32be((v), (x)))
  47. /* USB speed and corresponding string calculated from status register value */
  48. #define GR_SPEED(status) \
  49. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  50. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  51. /* Size of hardware buffer calculated from epctrl register value */
  52. #define GR_BUFFER_SIZE(epctrl) \
  53. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  54. GR_EPCTRL_BUFSZ_SCALER)
  55. /* ---------------------------------------------------------------------- */
  56. /* Debug printout functionality */
  57. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  58. static const char *gr_ep0state_string(enum gr_ep0state state)
  59. {
  60. static const char *const names[] = {
  61. [GR_EP0_DISCONNECT] = "disconnect",
  62. [GR_EP0_SETUP] = "setup",
  63. [GR_EP0_IDATA] = "idata",
  64. [GR_EP0_ODATA] = "odata",
  65. [GR_EP0_ISTATUS] = "istatus",
  66. [GR_EP0_OSTATUS] = "ostatus",
  67. [GR_EP0_STALL] = "stall",
  68. [GR_EP0_SUSPEND] = "suspend",
  69. };
  70. if (state < 0 || state >= ARRAY_SIZE(names))
  71. return "UNKNOWN";
  72. return names[state];
  73. }
  74. #ifdef VERBOSE_DEBUG
  75. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  76. struct gr_request *req)
  77. {
  78. int buflen = ep->is_in ? req->req.length : req->req.actual;
  79. int rowlen = 32;
  80. int plen = min(rowlen, buflen);
  81. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  82. (buflen > plen ? " (truncated)" : ""));
  83. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  84. rowlen, 4, req->req.buf, plen, false);
  85. }
  86. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  87. u16 value, u16 index, u16 length)
  88. {
  89. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  90. type, request, value, index, length);
  91. }
  92. #else /* !VERBOSE_DEBUG */
  93. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  94. struct gr_request *req) {}
  95. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  96. u16 value, u16 index, u16 length) {}
  97. #endif /* VERBOSE_DEBUG */
  98. /* ---------------------------------------------------------------------- */
  99. /* Debugfs functionality */
  100. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  101. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  102. {
  103. u32 epctrl = gr_read32(&ep->regs->epctrl);
  104. u32 epstat = gr_read32(&ep->regs->epstat);
  105. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  106. struct gr_request *req;
  107. seq_printf(seq, "%s:\n", ep->ep.name);
  108. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  109. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  110. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  111. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  112. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  113. seq_printf(seq, " stopped = %d\n", ep->stopped);
  114. seq_printf(seq, " wedged = %d\n", ep->wedged);
  115. seq_printf(seq, " callback = %d\n", ep->callback);
  116. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  117. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  118. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  119. if (mode == 1 || mode == 3)
  120. seq_printf(seq, " nt = %d\n",
  121. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  122. seq_printf(seq, " Buffer 0: %s %s%d\n",
  123. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  124. epstat & GR_EPSTAT_BS ? " " : "selected ",
  125. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  126. seq_printf(seq, " Buffer 1: %s %s%d\n",
  127. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? "selected " : " ",
  129. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  130. if (list_empty(&ep->queue)) {
  131. seq_puts(seq, " Queue: empty\n\n");
  132. return;
  133. }
  134. seq_puts(seq, " Queue:\n");
  135. list_for_each_entry(req, &ep->queue, queue) {
  136. struct gr_dma_desc *desc;
  137. struct gr_dma_desc *next;
  138. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  139. &req->req.buf, req->req.actual, req->req.length);
  140. next = req->first_desc;
  141. do {
  142. desc = next;
  143. next = desc->next_desc;
  144. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  145. desc == req->curr_desc ? 'c' : ' ',
  146. desc, desc->paddr, desc->ctrl, desc->data);
  147. } while (desc != req->last_desc);
  148. }
  149. seq_puts(seq, "\n");
  150. }
  151. static int gr_dfs_show(struct seq_file *seq, void *v)
  152. {
  153. struct gr_udc *dev = seq->private;
  154. u32 control = gr_read32(&dev->regs->control);
  155. u32 status = gr_read32(&dev->regs->status);
  156. struct gr_ep *ep;
  157. seq_printf(seq, "usb state = %s\n",
  158. usb_state_string(dev->gadget.state));
  159. seq_printf(seq, "address = %d\n",
  160. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  161. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  162. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  163. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  164. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  165. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  166. seq_puts(seq, "\n");
  167. list_for_each_entry(ep, &dev->ep_list, ep_list)
  168. gr_seq_ep_show(seq, ep);
  169. return 0;
  170. }
  171. DEFINE_SHOW_ATTRIBUTE(gr_dfs);
  172. static void gr_dfs_create(struct gr_udc *dev)
  173. {
  174. const char *name = "gr_udc_state";
  175. struct dentry *root;
  176. root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
  177. debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
  178. }
  179. static void gr_dfs_delete(struct gr_udc *dev)
  180. {
  181. debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
  182. }
  183. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  184. static void gr_dfs_create(struct gr_udc *dev) {}
  185. static void gr_dfs_delete(struct gr_udc *dev) {}
  186. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  187. /* ---------------------------------------------------------------------- */
  188. /* DMA and request handling */
  189. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  190. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  191. {
  192. dma_addr_t paddr;
  193. struct gr_dma_desc *dma_desc;
  194. dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
  195. if (!dma_desc) {
  196. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  197. return NULL;
  198. }
  199. dma_desc->paddr = paddr;
  200. return dma_desc;
  201. }
  202. static inline void gr_free_dma_desc(struct gr_udc *dev,
  203. struct gr_dma_desc *desc)
  204. {
  205. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  206. }
  207. /* Frees the chain of struct gr_dma_desc for the given request */
  208. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  209. {
  210. struct gr_dma_desc *desc;
  211. struct gr_dma_desc *next;
  212. next = req->first_desc;
  213. if (!next)
  214. return;
  215. do {
  216. desc = next;
  217. next = desc->next_desc;
  218. gr_free_dma_desc(dev, desc);
  219. } while (desc != req->last_desc);
  220. req->first_desc = NULL;
  221. req->curr_desc = NULL;
  222. req->last_desc = NULL;
  223. }
  224. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  225. /*
  226. * Frees allocated resources and calls the appropriate completion function/setup
  227. * package handler for a finished request.
  228. *
  229. * Must be called with dev->lock held and irqs disabled.
  230. */
  231. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  232. int status)
  233. __releases(&dev->lock)
  234. __acquires(&dev->lock)
  235. {
  236. struct gr_udc *dev;
  237. list_del_init(&req->queue);
  238. if (likely(req->req.status == -EINPROGRESS))
  239. req->req.status = status;
  240. else
  241. status = req->req.status;
  242. dev = ep->dev;
  243. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  244. gr_free_dma_desc_chain(dev, req);
  245. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  246. req->req.actual = req->req.length;
  247. } else if (req->oddlen && req->req.actual > req->evenlen) {
  248. /*
  249. * Copy to user buffer in this case where length was not evenly
  250. * divisible by ep->ep.maxpacket and the last descriptor was
  251. * actually used.
  252. */
  253. char *buftail = ((char *)req->req.buf + req->evenlen);
  254. memcpy(buftail, ep->tailbuf, req->oddlen);
  255. if (req->req.actual > req->req.length) {
  256. /* We got more data than was requested */
  257. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  258. ep->ep.name);
  259. gr_dbgprint_request("OVFL", ep, req);
  260. req->req.status = -EOVERFLOW;
  261. }
  262. }
  263. if (!status) {
  264. if (ep->is_in)
  265. gr_dbgprint_request("SENT", ep, req);
  266. else
  267. gr_dbgprint_request("RECV", ep, req);
  268. }
  269. /* Prevent changes to ep->queue during callback */
  270. ep->callback = 1;
  271. if (req == dev->ep0reqo && !status) {
  272. if (req->setup)
  273. gr_ep0_setup(dev, req);
  274. else
  275. dev_err(dev->dev,
  276. "Unexpected non setup packet on ep0in\n");
  277. } else if (req->req.complete) {
  278. spin_unlock(&dev->lock);
  279. usb_gadget_giveback_request(&ep->ep, &req->req);
  280. spin_lock(&dev->lock);
  281. }
  282. ep->callback = 0;
  283. }
  284. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  285. {
  286. struct gr_request *req;
  287. req = kzalloc(sizeof(*req), gfp_flags);
  288. if (!req)
  289. return NULL;
  290. INIT_LIST_HEAD(&req->queue);
  291. return &req->req;
  292. }
  293. /*
  294. * Starts DMA for endpoint ep if there are requests in the queue.
  295. *
  296. * Must be called with dev->lock held and with !ep->stopped.
  297. */
  298. static void gr_start_dma(struct gr_ep *ep)
  299. {
  300. struct gr_request *req;
  301. u32 dmactrl;
  302. if (list_empty(&ep->queue)) {
  303. ep->dma_start = 0;
  304. return;
  305. }
  306. req = list_first_entry(&ep->queue, struct gr_request, queue);
  307. /* A descriptor should already have been allocated */
  308. BUG_ON(!req->curr_desc);
  309. /*
  310. * The DMA controller can not handle smaller OUT buffers than
  311. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  312. * long packet are received. Therefore an internal bounce buffer gets
  313. * used when such a request gets enabled.
  314. */
  315. if (!ep->is_in && req->oddlen)
  316. req->last_desc->data = ep->tailbuf_paddr;
  317. wmb(); /* Make sure all is settled before handing it over to DMA */
  318. /* Set the descriptor pointer in the hardware */
  319. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  320. /* Announce available descriptors */
  321. dmactrl = gr_read32(&ep->regs->dmactrl);
  322. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  323. ep->dma_start = 1;
  324. }
  325. /*
  326. * Finishes the first request in the ep's queue and, if available, starts the
  327. * next request in queue.
  328. *
  329. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  330. */
  331. static void gr_dma_advance(struct gr_ep *ep, int status)
  332. {
  333. struct gr_request *req;
  334. req = list_first_entry(&ep->queue, struct gr_request, queue);
  335. gr_finish_request(ep, req, status);
  336. gr_start_dma(ep); /* Regardless of ep->dma_start */
  337. }
  338. /*
  339. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  340. * transfer to be canceled and clears GR_DMACTRL_DA.
  341. *
  342. * Must be called with dev->lock held.
  343. */
  344. static void gr_abort_dma(struct gr_ep *ep)
  345. {
  346. u32 dmactrl;
  347. dmactrl = gr_read32(&ep->regs->dmactrl);
  348. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  349. }
  350. /*
  351. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  352. * chain.
  353. *
  354. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  355. * smaller buffer than MAXPL in the OUT direction.
  356. */
  357. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  358. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  359. {
  360. struct gr_dma_desc *desc;
  361. desc = gr_alloc_dma_desc(ep, gfp_flags);
  362. if (!desc)
  363. return -ENOMEM;
  364. desc->data = data;
  365. if (ep->is_in)
  366. desc->ctrl =
  367. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  368. else
  369. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  370. if (!req->first_desc) {
  371. req->first_desc = desc;
  372. req->curr_desc = desc;
  373. } else {
  374. req->last_desc->next_desc = desc;
  375. req->last_desc->next = desc->paddr;
  376. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  377. }
  378. req->last_desc = desc;
  379. return 0;
  380. }
  381. /*
  382. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  383. * together covers req->req.length bytes of the buffer at DMA address
  384. * req->req.dma for the OUT direction.
  385. *
  386. * The first descriptor in the chain is enabled, the rest disabled. The
  387. * interrupt handler will later enable them one by one when needed so we can
  388. * find out when the transfer is finished. For OUT endpoints, all descriptors
  389. * therefore generate interrutps.
  390. */
  391. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  392. gfp_t gfp_flags)
  393. {
  394. u16 bytes_left; /* Bytes left to provide descriptors for */
  395. u16 bytes_used; /* Bytes accommodated for */
  396. int ret = 0;
  397. req->first_desc = NULL; /* Signals that no allocation is done yet */
  398. bytes_left = req->req.length;
  399. bytes_used = 0;
  400. while (bytes_left > 0) {
  401. dma_addr_t start = req->req.dma + bytes_used;
  402. u16 size = min(bytes_left, ep->bytes_per_buffer);
  403. if (size < ep->bytes_per_buffer) {
  404. /* Prepare using bounce buffer */
  405. req->evenlen = req->req.length - bytes_left;
  406. req->oddlen = size;
  407. }
  408. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  409. if (ret)
  410. goto alloc_err;
  411. bytes_left -= size;
  412. bytes_used += size;
  413. }
  414. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  415. return 0;
  416. alloc_err:
  417. gr_free_dma_desc_chain(ep->dev, req);
  418. return ret;
  419. }
  420. /*
  421. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  422. * together covers req->req.length bytes of the buffer at DMA address
  423. * req->req.dma for the IN direction.
  424. *
  425. * When more data is provided than the maximum payload size, the hardware splits
  426. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  427. * is always set to a multiple of the maximum payload (restricted to the valid
  428. * number of maximum payloads during high bandwidth isochronous or interrupt
  429. * transfers)
  430. *
  431. * All descriptors are enabled from the beginning and we only generate an
  432. * interrupt for the last one indicating that the entire request has been pushed
  433. * to hardware.
  434. */
  435. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  436. gfp_t gfp_flags)
  437. {
  438. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  439. u16 bytes_used; /* Bytes in req accommodated for */
  440. int ret = 0;
  441. req->first_desc = NULL; /* Signals that no allocation is done yet */
  442. bytes_left = req->req.length;
  443. bytes_used = 0;
  444. do { /* Allow for zero length packets */
  445. dma_addr_t start = req->req.dma + bytes_used;
  446. u16 size = min(bytes_left, ep->bytes_per_buffer);
  447. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  448. if (ret)
  449. goto alloc_err;
  450. bytes_left -= size;
  451. bytes_used += size;
  452. } while (bytes_left > 0);
  453. /*
  454. * Send an extra zero length packet to indicate that no more data is
  455. * available when req->req.zero is set and the data length is even
  456. * multiples of ep->ep.maxpacket.
  457. */
  458. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  459. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  460. if (ret)
  461. goto alloc_err;
  462. }
  463. /*
  464. * For IN packets we only want to know when the last packet has been
  465. * transmitted (not just put into internal buffers).
  466. */
  467. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  468. return 0;
  469. alloc_err:
  470. gr_free_dma_desc_chain(ep->dev, req);
  471. return ret;
  472. }
  473. /* Must be called with dev->lock held */
  474. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  475. {
  476. struct gr_udc *dev = ep->dev;
  477. int ret;
  478. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  479. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  480. return -EINVAL;
  481. }
  482. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  483. dev_err(dev->dev,
  484. "Invalid request for %s: buf=%p list_empty=%d\n",
  485. ep->ep.name, req->req.buf, list_empty(&req->queue));
  486. return -EINVAL;
  487. }
  488. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  489. dev_err(dev->dev, "-ESHUTDOWN");
  490. return -ESHUTDOWN;
  491. }
  492. /* Can't touch registers when suspended */
  493. if (dev->ep0state == GR_EP0_SUSPEND) {
  494. dev_err(dev->dev, "-EBUSY");
  495. return -EBUSY;
  496. }
  497. /* Set up DMA mapping in case the caller didn't */
  498. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  499. if (ret) {
  500. dev_err(dev->dev, "usb_gadget_map_request");
  501. return ret;
  502. }
  503. if (ep->is_in)
  504. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  505. else
  506. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  507. if (ret)
  508. return ret;
  509. req->req.status = -EINPROGRESS;
  510. req->req.actual = 0;
  511. list_add_tail(&req->queue, &ep->queue);
  512. /* Start DMA if not started, otherwise interrupt handler handles it */
  513. if (!ep->dma_start && likely(!ep->stopped))
  514. gr_start_dma(ep);
  515. return 0;
  516. }
  517. /*
  518. * Queue a request from within the driver.
  519. *
  520. * Must be called with dev->lock held.
  521. */
  522. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  523. gfp_t gfp_flags)
  524. {
  525. if (ep->is_in)
  526. gr_dbgprint_request("RESP", ep, req);
  527. return gr_queue(ep, req, gfp_flags);
  528. }
  529. /* ---------------------------------------------------------------------- */
  530. /* General helper functions */
  531. /*
  532. * Dequeue ALL requests.
  533. *
  534. * Must be called with dev->lock held and irqs disabled.
  535. */
  536. static void gr_ep_nuke(struct gr_ep *ep)
  537. {
  538. struct gr_request *req;
  539. ep->stopped = 1;
  540. ep->dma_start = 0;
  541. gr_abort_dma(ep);
  542. while (!list_empty(&ep->queue)) {
  543. req = list_first_entry(&ep->queue, struct gr_request, queue);
  544. gr_finish_request(ep, req, -ESHUTDOWN);
  545. }
  546. }
  547. /*
  548. * Reset the hardware state of this endpoint.
  549. *
  550. * Must be called with dev->lock held.
  551. */
  552. static void gr_ep_reset(struct gr_ep *ep)
  553. {
  554. gr_write32(&ep->regs->epctrl, 0);
  555. gr_write32(&ep->regs->dmactrl, 0);
  556. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  557. ep->ep.desc = NULL;
  558. ep->stopped = 1;
  559. ep->dma_start = 0;
  560. }
  561. /*
  562. * Generate STALL on ep0in/out.
  563. *
  564. * Must be called with dev->lock held.
  565. */
  566. static void gr_control_stall(struct gr_udc *dev)
  567. {
  568. u32 epctrl;
  569. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  570. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  571. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  572. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  573. dev->ep0state = GR_EP0_STALL;
  574. }
  575. /*
  576. * Halts, halts and wedges, or clears halt for an endpoint.
  577. *
  578. * Must be called with dev->lock held.
  579. */
  580. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  581. {
  582. u32 epctrl;
  583. int retval = 0;
  584. if (ep->num && !ep->ep.desc)
  585. return -EINVAL;
  586. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  587. return -EOPNOTSUPP;
  588. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  589. if (!ep->num) {
  590. if (halt && !fromhost) {
  591. /* ep0 halt from gadget - generate protocol stall */
  592. gr_control_stall(ep->dev);
  593. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  594. return 0;
  595. }
  596. return -EINVAL;
  597. }
  598. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  599. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  600. epctrl = gr_read32(&ep->regs->epctrl);
  601. if (halt) {
  602. /* Set HALT */
  603. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  604. ep->stopped = 1;
  605. if (wedge)
  606. ep->wedged = 1;
  607. } else {
  608. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  609. ep->stopped = 0;
  610. ep->wedged = 0;
  611. /* Things might have been queued up in the meantime */
  612. if (!ep->dma_start)
  613. gr_start_dma(ep);
  614. }
  615. return retval;
  616. }
  617. /* Must be called with dev->lock held */
  618. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  619. {
  620. if (dev->ep0state != value)
  621. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  622. gr_ep0state_string(value));
  623. dev->ep0state = value;
  624. }
  625. /*
  626. * Should only be called when endpoints can not generate interrupts.
  627. *
  628. * Must be called with dev->lock held.
  629. */
  630. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  631. {
  632. gr_write32(&dev->regs->control, 0);
  633. wmb(); /* Make sure that we do not deny one of our interrupts */
  634. dev->irq_enabled = 0;
  635. }
  636. /*
  637. * Stop all device activity and disable data line pullup.
  638. *
  639. * Must be called with dev->lock held and irqs disabled.
  640. */
  641. static void gr_stop_activity(struct gr_udc *dev)
  642. {
  643. struct gr_ep *ep;
  644. list_for_each_entry(ep, &dev->ep_list, ep_list)
  645. gr_ep_nuke(ep);
  646. gr_disable_interrupts_and_pullup(dev);
  647. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  648. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  649. }
  650. /* ---------------------------------------------------------------------- */
  651. /* ep0 setup packet handling */
  652. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  653. struct usb_request *_req)
  654. {
  655. struct gr_ep *ep;
  656. struct gr_udc *dev;
  657. u32 control;
  658. ep = container_of(_ep, struct gr_ep, ep);
  659. dev = ep->dev;
  660. spin_lock(&dev->lock);
  661. control = gr_read32(&dev->regs->control);
  662. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  663. gr_write32(&dev->regs->control, control);
  664. spin_unlock(&dev->lock);
  665. }
  666. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  667. {
  668. /* Nothing needs to be done here */
  669. }
  670. /*
  671. * Queue a response on ep0in.
  672. *
  673. * Must be called with dev->lock held.
  674. */
  675. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  676. void (*complete)(struct usb_ep *ep,
  677. struct usb_request *req))
  678. {
  679. u8 *reqbuf = dev->ep0reqi->req.buf;
  680. int status;
  681. int i;
  682. for (i = 0; i < length; i++)
  683. reqbuf[i] = buf[i];
  684. dev->ep0reqi->req.length = length;
  685. dev->ep0reqi->req.complete = complete;
  686. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  687. if (status < 0)
  688. dev_err(dev->dev,
  689. "Could not queue ep0in setup response: %d\n", status);
  690. return status;
  691. }
  692. /*
  693. * Queue a 2 byte response on ep0in.
  694. *
  695. * Must be called with dev->lock held.
  696. */
  697. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  698. {
  699. __le16 le_response = cpu_to_le16(response);
  700. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  701. gr_ep0_dummy_complete);
  702. }
  703. /*
  704. * Queue a ZLP response on ep0in.
  705. *
  706. * Must be called with dev->lock held.
  707. */
  708. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  709. {
  710. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  711. }
  712. /*
  713. * This is run when a SET_ADDRESS request is received. First writes
  714. * the new address to the control register which is updated internally
  715. * when the next IN packet is ACKED.
  716. *
  717. * Must be called with dev->lock held.
  718. */
  719. static void gr_set_address(struct gr_udc *dev, u8 address)
  720. {
  721. u32 control;
  722. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  723. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  724. control |= GR_CONTROL_SU;
  725. gr_write32(&dev->regs->control, control);
  726. }
  727. /*
  728. * Returns negative for STALL, 0 for successful handling and positive for
  729. * delegation.
  730. *
  731. * Must be called with dev->lock held.
  732. */
  733. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  734. u16 value, u16 index)
  735. {
  736. u16 response;
  737. u8 test;
  738. switch (request) {
  739. case USB_REQ_SET_ADDRESS:
  740. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  741. gr_set_address(dev, value & 0xff);
  742. if (value)
  743. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  744. else
  745. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  746. return gr_ep0_respond_empty(dev);
  747. case USB_REQ_GET_STATUS:
  748. /* Self powered | remote wakeup */
  749. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  750. return gr_ep0_respond_u16(dev, response);
  751. case USB_REQ_SET_FEATURE:
  752. switch (value) {
  753. case USB_DEVICE_REMOTE_WAKEUP:
  754. /* Allow remote wakeup */
  755. dev->remote_wakeup = 1;
  756. return gr_ep0_respond_empty(dev);
  757. case USB_DEVICE_TEST_MODE:
  758. /* The hardware does not support USB_TEST_FORCE_ENABLE */
  759. test = index >> 8;
  760. if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
  761. dev->test_mode = test;
  762. return gr_ep0_respond(dev, NULL, 0,
  763. gr_ep0_testmode_complete);
  764. }
  765. }
  766. break;
  767. case USB_REQ_CLEAR_FEATURE:
  768. switch (value) {
  769. case USB_DEVICE_REMOTE_WAKEUP:
  770. /* Disallow remote wakeup */
  771. dev->remote_wakeup = 0;
  772. return gr_ep0_respond_empty(dev);
  773. }
  774. break;
  775. }
  776. return 1; /* Delegate the rest */
  777. }
  778. /*
  779. * Returns negative for STALL, 0 for successful handling and positive for
  780. * delegation.
  781. *
  782. * Must be called with dev->lock held.
  783. */
  784. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  785. u16 value, u16 index)
  786. {
  787. if (dev->gadget.state != USB_STATE_CONFIGURED)
  788. return -1;
  789. /*
  790. * Should return STALL for invalid interfaces, but udc driver does not
  791. * know anything about that. However, many gadget drivers do not handle
  792. * GET_STATUS so we need to take care of that.
  793. */
  794. switch (request) {
  795. case USB_REQ_GET_STATUS:
  796. return gr_ep0_respond_u16(dev, 0x0000);
  797. case USB_REQ_SET_FEATURE:
  798. case USB_REQ_CLEAR_FEATURE:
  799. /*
  800. * No possible valid standard requests. Still let gadget drivers
  801. * have a go at it.
  802. */
  803. break;
  804. }
  805. return 1; /* Delegate the rest */
  806. }
  807. /*
  808. * Returns negative for STALL, 0 for successful handling and positive for
  809. * delegation.
  810. *
  811. * Must be called with dev->lock held.
  812. */
  813. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  814. u16 value, u16 index)
  815. {
  816. struct gr_ep *ep;
  817. int status;
  818. int halted;
  819. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  820. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  821. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  822. return -1;
  823. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  824. return -1;
  825. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  826. switch (request) {
  827. case USB_REQ_GET_STATUS:
  828. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  829. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  830. case USB_REQ_SET_FEATURE:
  831. switch (value) {
  832. case USB_ENDPOINT_HALT:
  833. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  834. if (status >= 0)
  835. status = gr_ep0_respond_empty(dev);
  836. return status;
  837. }
  838. break;
  839. case USB_REQ_CLEAR_FEATURE:
  840. switch (value) {
  841. case USB_ENDPOINT_HALT:
  842. if (ep->wedged)
  843. return -1;
  844. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  845. if (status >= 0)
  846. status = gr_ep0_respond_empty(dev);
  847. return status;
  848. }
  849. break;
  850. }
  851. return 1; /* Delegate the rest */
  852. }
  853. /* Must be called with dev->lock held */
  854. static void gr_ep0out_requeue(struct gr_udc *dev)
  855. {
  856. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  857. if (ret)
  858. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  859. ret);
  860. }
  861. /*
  862. * The main function dealing with setup requests on ep0.
  863. *
  864. * Must be called with dev->lock held and irqs disabled
  865. */
  866. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  867. __releases(&dev->lock)
  868. __acquires(&dev->lock)
  869. {
  870. union {
  871. struct usb_ctrlrequest ctrl;
  872. u8 raw[8];
  873. u32 word[2];
  874. } u;
  875. u8 type;
  876. u8 request;
  877. u16 value;
  878. u16 index;
  879. u16 length;
  880. int i;
  881. int status;
  882. /* Restore from ep0 halt */
  883. if (dev->ep0state == GR_EP0_STALL) {
  884. gr_set_ep0state(dev, GR_EP0_SETUP);
  885. if (!req->req.actual)
  886. goto out;
  887. }
  888. if (dev->ep0state == GR_EP0_ISTATUS) {
  889. gr_set_ep0state(dev, GR_EP0_SETUP);
  890. if (req->req.actual > 0)
  891. dev_dbg(dev->dev,
  892. "Unexpected setup packet at state %s\n",
  893. gr_ep0state_string(GR_EP0_ISTATUS));
  894. else
  895. goto out; /* Got expected ZLP */
  896. } else if (dev->ep0state != GR_EP0_SETUP) {
  897. dev_info(dev->dev,
  898. "Unexpected ep0out request at state %s - stalling\n",
  899. gr_ep0state_string(dev->ep0state));
  900. gr_control_stall(dev);
  901. gr_set_ep0state(dev, GR_EP0_SETUP);
  902. goto out;
  903. } else if (!req->req.actual) {
  904. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  905. gr_ep0state_string(dev->ep0state));
  906. goto out;
  907. }
  908. /* Handle SETUP packet */
  909. for (i = 0; i < req->req.actual; i++)
  910. u.raw[i] = ((u8 *)req->req.buf)[i];
  911. type = u.ctrl.bRequestType;
  912. request = u.ctrl.bRequest;
  913. value = le16_to_cpu(u.ctrl.wValue);
  914. index = le16_to_cpu(u.ctrl.wIndex);
  915. length = le16_to_cpu(u.ctrl.wLength);
  916. gr_dbgprint_devreq(dev, type, request, value, index, length);
  917. /* Check for data stage */
  918. if (length) {
  919. if (type & USB_DIR_IN)
  920. gr_set_ep0state(dev, GR_EP0_IDATA);
  921. else
  922. gr_set_ep0state(dev, GR_EP0_ODATA);
  923. }
  924. status = 1; /* Positive status flags delegation */
  925. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  926. switch (type & USB_RECIP_MASK) {
  927. case USB_RECIP_DEVICE:
  928. status = gr_device_request(dev, type, request,
  929. value, index);
  930. break;
  931. case USB_RECIP_ENDPOINT:
  932. status = gr_endpoint_request(dev, type, request,
  933. value, index);
  934. break;
  935. case USB_RECIP_INTERFACE:
  936. status = gr_interface_request(dev, type, request,
  937. value, index);
  938. break;
  939. }
  940. }
  941. if (status > 0) {
  942. spin_unlock(&dev->lock);
  943. dev_vdbg(dev->dev, "DELEGATE\n");
  944. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  945. spin_lock(&dev->lock);
  946. }
  947. /* Generate STALL on both ep0out and ep0in if requested */
  948. if (unlikely(status < 0)) {
  949. dev_vdbg(dev->dev, "STALL\n");
  950. gr_control_stall(dev);
  951. }
  952. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  953. request == USB_REQ_SET_CONFIGURATION) {
  954. if (!value) {
  955. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  956. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  957. } else if (status >= 0) {
  958. /* Not configured unless gadget OK:s it */
  959. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  960. usb_gadget_set_state(&dev->gadget,
  961. USB_STATE_CONFIGURED);
  962. }
  963. }
  964. /* Get ready for next stage */
  965. if (dev->ep0state == GR_EP0_ODATA)
  966. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  967. else if (dev->ep0state == GR_EP0_IDATA)
  968. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  969. else
  970. gr_set_ep0state(dev, GR_EP0_SETUP);
  971. out:
  972. gr_ep0out_requeue(dev);
  973. }
  974. /* ---------------------------------------------------------------------- */
  975. /* VBUS and USB reset handling */
  976. /* Must be called with dev->lock held and irqs disabled */
  977. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  978. {
  979. u32 control;
  980. dev->gadget.speed = GR_SPEED(status);
  981. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  982. /* Turn on full interrupts and pullup */
  983. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  984. GR_CONTROL_SP | GR_CONTROL_EP);
  985. gr_write32(&dev->regs->control, control);
  986. }
  987. /* Must be called with dev->lock held */
  988. static void gr_enable_vbus_detect(struct gr_udc *dev)
  989. {
  990. u32 status;
  991. dev->irq_enabled = 1;
  992. wmb(); /* Make sure we do not ignore an interrupt */
  993. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  994. /* Take care of the case we are already plugged in at this point */
  995. status = gr_read32(&dev->regs->status);
  996. if (status & GR_STATUS_VB)
  997. gr_vbus_connected(dev, status);
  998. }
  999. /* Must be called with dev->lock held and irqs disabled */
  1000. static void gr_vbus_disconnected(struct gr_udc *dev)
  1001. {
  1002. gr_stop_activity(dev);
  1003. /* Report disconnect */
  1004. if (dev->driver && dev->driver->disconnect) {
  1005. spin_unlock(&dev->lock);
  1006. dev->driver->disconnect(&dev->gadget);
  1007. spin_lock(&dev->lock);
  1008. }
  1009. gr_enable_vbus_detect(dev);
  1010. }
  1011. /* Must be called with dev->lock held and irqs disabled */
  1012. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1013. {
  1014. gr_set_address(dev, 0);
  1015. gr_set_ep0state(dev, GR_EP0_SETUP);
  1016. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1017. dev->gadget.speed = GR_SPEED(status);
  1018. gr_ep_nuke(&dev->epo[0]);
  1019. gr_ep_nuke(&dev->epi[0]);
  1020. dev->epo[0].stopped = 0;
  1021. dev->epi[0].stopped = 0;
  1022. gr_ep0out_requeue(dev);
  1023. }
  1024. /* ---------------------------------------------------------------------- */
  1025. /* Irq handling */
  1026. /*
  1027. * Handles interrupts from in endpoints. Returns whether something was handled.
  1028. *
  1029. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1030. */
  1031. static int gr_handle_in_ep(struct gr_ep *ep)
  1032. {
  1033. struct gr_request *req;
  1034. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1035. if (!req->last_desc)
  1036. return 0;
  1037. if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1038. return 0; /* Not put in hardware buffers yet */
  1039. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1040. return 0; /* Not transmitted yet, still in hardware buffers */
  1041. /* Write complete */
  1042. gr_dma_advance(ep, 0);
  1043. return 1;
  1044. }
  1045. /*
  1046. * Handles interrupts from out endpoints. Returns whether something was handled.
  1047. *
  1048. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1049. */
  1050. static int gr_handle_out_ep(struct gr_ep *ep)
  1051. {
  1052. u32 ep_dmactrl;
  1053. u32 ctrl;
  1054. u16 len;
  1055. struct gr_request *req;
  1056. struct gr_udc *dev = ep->dev;
  1057. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1058. if (!req->curr_desc)
  1059. return 0;
  1060. ctrl = READ_ONCE(req->curr_desc->ctrl);
  1061. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1062. return 0; /* Not received yet */
  1063. /* Read complete */
  1064. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1065. req->req.actual += len;
  1066. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1067. req->setup = 1;
  1068. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1069. /* Short packet or >= expected size - we are done */
  1070. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1071. /*
  1072. * Send a status stage ZLP to ack the DATA stage in the
  1073. * OUT direction. This needs to be done before
  1074. * gr_dma_advance as that can lead to a call to
  1075. * ep0_setup that can change dev->ep0state.
  1076. */
  1077. gr_ep0_respond_empty(dev);
  1078. gr_set_ep0state(dev, GR_EP0_SETUP);
  1079. }
  1080. gr_dma_advance(ep, 0);
  1081. } else {
  1082. /* Not done yet. Enable the next descriptor to receive more. */
  1083. req->curr_desc = req->curr_desc->next_desc;
  1084. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1085. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1086. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1087. }
  1088. return 1;
  1089. }
  1090. /*
  1091. * Handle state changes. Returns whether something was handled.
  1092. *
  1093. * Must be called with dev->lock held and irqs disabled.
  1094. */
  1095. static int gr_handle_state_changes(struct gr_udc *dev)
  1096. {
  1097. u32 status = gr_read32(&dev->regs->status);
  1098. int handled = 0;
  1099. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1100. dev->gadget.state == USB_STATE_ATTACHED);
  1101. /* VBUS valid detected */
  1102. if (!powstate && (status & GR_STATUS_VB)) {
  1103. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1104. gr_vbus_connected(dev, status);
  1105. handled = 1;
  1106. }
  1107. /* Disconnect */
  1108. if (powstate && !(status & GR_STATUS_VB)) {
  1109. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1110. gr_vbus_disconnected(dev);
  1111. handled = 1;
  1112. }
  1113. /* USB reset detected */
  1114. if (status & GR_STATUS_UR) {
  1115. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1116. GR_SPEED_STR(status));
  1117. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1118. gr_udc_usbreset(dev, status);
  1119. handled = 1;
  1120. }
  1121. /* Speed change */
  1122. if (dev->gadget.speed != GR_SPEED(status)) {
  1123. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1124. GR_SPEED_STR(status));
  1125. dev->gadget.speed = GR_SPEED(status);
  1126. handled = 1;
  1127. }
  1128. /* Going into suspend */
  1129. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1130. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1131. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1132. dev->suspended_from = dev->gadget.state;
  1133. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1134. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1135. dev->driver && dev->driver->suspend) {
  1136. spin_unlock(&dev->lock);
  1137. dev->driver->suspend(&dev->gadget);
  1138. spin_lock(&dev->lock);
  1139. }
  1140. handled = 1;
  1141. }
  1142. /* Coming out of suspend */
  1143. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1144. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1145. if (dev->suspended_from == USB_STATE_POWERED)
  1146. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1147. else
  1148. gr_set_ep0state(dev, GR_EP0_SETUP);
  1149. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1150. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1151. dev->driver && dev->driver->resume) {
  1152. spin_unlock(&dev->lock);
  1153. dev->driver->resume(&dev->gadget);
  1154. spin_lock(&dev->lock);
  1155. }
  1156. handled = 1;
  1157. }
  1158. return handled;
  1159. }
  1160. /* Non-interrupt context irq handler */
  1161. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1162. {
  1163. struct gr_udc *dev = _dev;
  1164. struct gr_ep *ep;
  1165. int handled = 0;
  1166. int i;
  1167. unsigned long flags;
  1168. spin_lock_irqsave(&dev->lock, flags);
  1169. if (!dev->irq_enabled)
  1170. goto out;
  1171. /*
  1172. * Check IN ep interrupts. We check these before the OUT eps because
  1173. * some gadgets reuse the request that might already be currently
  1174. * outstanding and needs to be completed (mainly setup requests).
  1175. */
  1176. for (i = 0; i < dev->nepi; i++) {
  1177. ep = &dev->epi[i];
  1178. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1179. handled = gr_handle_in_ep(ep) || handled;
  1180. }
  1181. /* Check OUT ep interrupts */
  1182. for (i = 0; i < dev->nepo; i++) {
  1183. ep = &dev->epo[i];
  1184. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1185. handled = gr_handle_out_ep(ep) || handled;
  1186. }
  1187. /* Check status interrupts */
  1188. handled = gr_handle_state_changes(dev) || handled;
  1189. /*
  1190. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1191. * handle because this shouldn't happen if we did everything right.
  1192. */
  1193. if (!handled) {
  1194. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1195. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1196. dev_err(dev->dev,
  1197. "AMBA Error occurred for %s\n",
  1198. ep->ep.name);
  1199. handled = 1;
  1200. }
  1201. }
  1202. }
  1203. out:
  1204. spin_unlock_irqrestore(&dev->lock, flags);
  1205. return handled ? IRQ_HANDLED : IRQ_NONE;
  1206. }
  1207. /* Interrupt context irq handler */
  1208. static irqreturn_t gr_irq(int irq, void *_dev)
  1209. {
  1210. struct gr_udc *dev = _dev;
  1211. if (!dev->irq_enabled)
  1212. return IRQ_NONE;
  1213. return IRQ_WAKE_THREAD;
  1214. }
  1215. /* ---------------------------------------------------------------------- */
  1216. /* USB ep ops */
  1217. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1218. static int gr_ep_enable(struct usb_ep *_ep,
  1219. const struct usb_endpoint_descriptor *desc)
  1220. {
  1221. struct gr_udc *dev;
  1222. struct gr_ep *ep;
  1223. u8 mode;
  1224. u8 nt;
  1225. u16 max;
  1226. u16 buffer_size = 0;
  1227. u32 epctrl;
  1228. ep = container_of(_ep, struct gr_ep, ep);
  1229. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1230. return -EINVAL;
  1231. dev = ep->dev;
  1232. /* 'ep0' IN and OUT are reserved */
  1233. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1234. return -EINVAL;
  1235. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1236. return -ESHUTDOWN;
  1237. /* Make sure we are clear for enabling */
  1238. epctrl = gr_read32(&ep->regs->epctrl);
  1239. if (epctrl & GR_EPCTRL_EV)
  1240. return -EBUSY;
  1241. /* Check that directions match */
  1242. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1243. return -EINVAL;
  1244. /* Check ep num */
  1245. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1246. (ep->is_in && ep->num >= dev->nepi))
  1247. return -EINVAL;
  1248. if (usb_endpoint_xfer_control(desc)) {
  1249. mode = 0;
  1250. } else if (usb_endpoint_xfer_isoc(desc)) {
  1251. mode = 1;
  1252. } else if (usb_endpoint_xfer_bulk(desc)) {
  1253. mode = 2;
  1254. } else if (usb_endpoint_xfer_int(desc)) {
  1255. mode = 3;
  1256. } else {
  1257. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1258. ep->ep.name);
  1259. return -EINVAL;
  1260. }
  1261. /*
  1262. * Bits 10-0 set the max payload. 12-11 set the number of
  1263. * additional transactions.
  1264. */
  1265. max = usb_endpoint_maxp(desc);
  1266. nt = usb_endpoint_maxp_mult(desc) - 1;
  1267. buffer_size = GR_BUFFER_SIZE(epctrl);
  1268. if (nt && (mode == 0 || mode == 2)) {
  1269. dev_err(dev->dev,
  1270. "%s mode: multiple trans./microframe not valid\n",
  1271. (mode == 2 ? "Bulk" : "Control"));
  1272. return -EINVAL;
  1273. } else if (nt == 0x3) {
  1274. dev_err(dev->dev,
  1275. "Invalid value 0x3 for additional trans./microframe\n");
  1276. return -EINVAL;
  1277. } else if ((nt + 1) * max > buffer_size) {
  1278. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1279. buffer_size, (nt + 1), max);
  1280. return -EINVAL;
  1281. } else if (max == 0) {
  1282. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1283. return -EINVAL;
  1284. } else if (max > ep->ep.maxpacket_limit) {
  1285. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1286. max, ep->ep.maxpacket_limit);
  1287. return -EINVAL;
  1288. }
  1289. spin_lock(&ep->dev->lock);
  1290. if (!ep->stopped) {
  1291. spin_unlock(&ep->dev->lock);
  1292. return -EBUSY;
  1293. }
  1294. ep->stopped = 0;
  1295. ep->wedged = 0;
  1296. ep->ep.desc = desc;
  1297. ep->ep.maxpacket = max;
  1298. ep->dma_start = 0;
  1299. if (nt) {
  1300. /*
  1301. * Maximum possible size of all payloads in one microframe
  1302. * regardless of direction when using high-bandwidth mode.
  1303. */
  1304. ep->bytes_per_buffer = (nt + 1) * max;
  1305. } else if (ep->is_in) {
  1306. /*
  1307. * The biggest multiple of maximum packet size that fits into
  1308. * the buffer. The hardware will split up into many packets in
  1309. * the IN direction.
  1310. */
  1311. ep->bytes_per_buffer = (buffer_size / max) * max;
  1312. } else {
  1313. /*
  1314. * Only single packets will be placed the buffers in the OUT
  1315. * direction.
  1316. */
  1317. ep->bytes_per_buffer = max;
  1318. }
  1319. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1320. | (nt << GR_EPCTRL_NT_POS)
  1321. | (mode << GR_EPCTRL_TT_POS)
  1322. | GR_EPCTRL_EV;
  1323. if (ep->is_in)
  1324. epctrl |= GR_EPCTRL_PI;
  1325. gr_write32(&ep->regs->epctrl, epctrl);
  1326. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1327. spin_unlock(&ep->dev->lock);
  1328. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1329. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1330. return 0;
  1331. }
  1332. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1333. static int gr_ep_disable(struct usb_ep *_ep)
  1334. {
  1335. struct gr_ep *ep;
  1336. struct gr_udc *dev;
  1337. unsigned long flags;
  1338. ep = container_of(_ep, struct gr_ep, ep);
  1339. if (!_ep || !ep->ep.desc)
  1340. return -ENODEV;
  1341. dev = ep->dev;
  1342. /* 'ep0' IN and OUT are reserved */
  1343. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1344. return -EINVAL;
  1345. if (dev->ep0state == GR_EP0_SUSPEND)
  1346. return -EBUSY;
  1347. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1348. spin_lock_irqsave(&dev->lock, flags);
  1349. gr_ep_nuke(ep);
  1350. gr_ep_reset(ep);
  1351. ep->ep.desc = NULL;
  1352. spin_unlock_irqrestore(&dev->lock, flags);
  1353. return 0;
  1354. }
  1355. /*
  1356. * Frees a request, but not any DMA buffers associated with it
  1357. * (gr_finish_request should already have taken care of that).
  1358. */
  1359. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1360. {
  1361. struct gr_request *req;
  1362. if (!_ep || !_req)
  1363. return;
  1364. req = container_of(_req, struct gr_request, req);
  1365. /* Leads to memory leak */
  1366. WARN(!list_empty(&req->queue),
  1367. "request not dequeued properly before freeing\n");
  1368. kfree(req);
  1369. }
  1370. /* Queue a request from the gadget */
  1371. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1372. gfp_t gfp_flags)
  1373. {
  1374. struct gr_ep *ep;
  1375. struct gr_request *req;
  1376. struct gr_udc *dev;
  1377. int ret;
  1378. if (unlikely(!_ep || !_req))
  1379. return -EINVAL;
  1380. ep = container_of(_ep, struct gr_ep, ep);
  1381. req = container_of(_req, struct gr_request, req);
  1382. dev = ep->dev;
  1383. spin_lock(&ep->dev->lock);
  1384. /*
  1385. * The ep0 pointer in the gadget struct is used both for ep0in and
  1386. * ep0out. In a data stage in the out direction ep0out needs to be used
  1387. * instead of the default ep0in. Completion functions might use
  1388. * driver_data, so that needs to be copied as well.
  1389. */
  1390. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1391. ep = &dev->epo[0];
  1392. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1393. }
  1394. if (ep->is_in)
  1395. gr_dbgprint_request("EXTERN", ep, req);
  1396. ret = gr_queue(ep, req, GFP_ATOMIC);
  1397. spin_unlock(&ep->dev->lock);
  1398. return ret;
  1399. }
  1400. /* Dequeue JUST ONE request */
  1401. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1402. {
  1403. struct gr_request *req = NULL, *iter;
  1404. struct gr_ep *ep;
  1405. struct gr_udc *dev;
  1406. int ret = 0;
  1407. unsigned long flags;
  1408. ep = container_of(_ep, struct gr_ep, ep);
  1409. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1410. return -EINVAL;
  1411. dev = ep->dev;
  1412. if (!dev->driver)
  1413. return -ESHUTDOWN;
  1414. /* We can't touch (DMA) registers when suspended */
  1415. if (dev->ep0state == GR_EP0_SUSPEND)
  1416. return -EBUSY;
  1417. spin_lock_irqsave(&dev->lock, flags);
  1418. /* Make sure it's actually queued on this endpoint */
  1419. list_for_each_entry(iter, &ep->queue, queue) {
  1420. if (&iter->req != _req)
  1421. continue;
  1422. req = iter;
  1423. break;
  1424. }
  1425. if (!req) {
  1426. ret = -EINVAL;
  1427. goto out;
  1428. }
  1429. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1430. /* This request is currently being processed */
  1431. gr_abort_dma(ep);
  1432. if (ep->stopped)
  1433. gr_finish_request(ep, req, -ECONNRESET);
  1434. else
  1435. gr_dma_advance(ep, -ECONNRESET);
  1436. } else if (!list_empty(&req->queue)) {
  1437. /* Not being processed - gr_finish_request dequeues it */
  1438. gr_finish_request(ep, req, -ECONNRESET);
  1439. } else {
  1440. ret = -EOPNOTSUPP;
  1441. }
  1442. out:
  1443. spin_unlock_irqrestore(&dev->lock, flags);
  1444. return ret;
  1445. }
  1446. /* Helper for gr_set_halt and gr_set_wedge */
  1447. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1448. {
  1449. int ret;
  1450. struct gr_ep *ep;
  1451. if (!_ep)
  1452. return -ENODEV;
  1453. ep = container_of(_ep, struct gr_ep, ep);
  1454. spin_lock(&ep->dev->lock);
  1455. /* Halting an IN endpoint should fail if queue is not empty */
  1456. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1457. ret = -EAGAIN;
  1458. goto out;
  1459. }
  1460. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1461. out:
  1462. spin_unlock(&ep->dev->lock);
  1463. return ret;
  1464. }
  1465. /* Halt endpoint */
  1466. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1467. {
  1468. return gr_set_halt_wedge(_ep, halt, 0);
  1469. }
  1470. /* Halt and wedge endpoint */
  1471. static int gr_set_wedge(struct usb_ep *_ep)
  1472. {
  1473. return gr_set_halt_wedge(_ep, 1, 1);
  1474. }
  1475. /*
  1476. * Return the total number of bytes currently stored in the internal buffers of
  1477. * the endpoint.
  1478. */
  1479. static int gr_fifo_status(struct usb_ep *_ep)
  1480. {
  1481. struct gr_ep *ep;
  1482. u32 epstat;
  1483. u32 bytes = 0;
  1484. if (!_ep)
  1485. return -ENODEV;
  1486. ep = container_of(_ep, struct gr_ep, ep);
  1487. epstat = gr_read32(&ep->regs->epstat);
  1488. if (epstat & GR_EPSTAT_B0)
  1489. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1490. if (epstat & GR_EPSTAT_B1)
  1491. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1492. return bytes;
  1493. }
  1494. /* Empty data from internal buffers of an endpoint. */
  1495. static void gr_fifo_flush(struct usb_ep *_ep)
  1496. {
  1497. struct gr_ep *ep;
  1498. u32 epctrl;
  1499. if (!_ep)
  1500. return;
  1501. ep = container_of(_ep, struct gr_ep, ep);
  1502. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1503. spin_lock(&ep->dev->lock);
  1504. epctrl = gr_read32(&ep->regs->epctrl);
  1505. epctrl |= GR_EPCTRL_CB;
  1506. gr_write32(&ep->regs->epctrl, epctrl);
  1507. spin_unlock(&ep->dev->lock);
  1508. }
  1509. static const struct usb_ep_ops gr_ep_ops = {
  1510. .enable = gr_ep_enable,
  1511. .disable = gr_ep_disable,
  1512. .alloc_request = gr_alloc_request,
  1513. .free_request = gr_free_request,
  1514. .queue = gr_queue_ext,
  1515. .dequeue = gr_dequeue,
  1516. .set_halt = gr_set_halt,
  1517. .set_wedge = gr_set_wedge,
  1518. .fifo_status = gr_fifo_status,
  1519. .fifo_flush = gr_fifo_flush,
  1520. };
  1521. /* ---------------------------------------------------------------------- */
  1522. /* USB Gadget ops */
  1523. static int gr_get_frame(struct usb_gadget *_gadget)
  1524. {
  1525. struct gr_udc *dev;
  1526. if (!_gadget)
  1527. return -ENODEV;
  1528. dev = container_of(_gadget, struct gr_udc, gadget);
  1529. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1530. }
  1531. static int gr_wakeup(struct usb_gadget *_gadget)
  1532. {
  1533. struct gr_udc *dev;
  1534. if (!_gadget)
  1535. return -ENODEV;
  1536. dev = container_of(_gadget, struct gr_udc, gadget);
  1537. /* Remote wakeup feature not enabled by host*/
  1538. if (!dev->remote_wakeup)
  1539. return -EINVAL;
  1540. spin_lock(&dev->lock);
  1541. gr_write32(&dev->regs->control,
  1542. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1543. spin_unlock(&dev->lock);
  1544. return 0;
  1545. }
  1546. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1547. {
  1548. struct gr_udc *dev;
  1549. u32 control;
  1550. if (!_gadget)
  1551. return -ENODEV;
  1552. dev = container_of(_gadget, struct gr_udc, gadget);
  1553. spin_lock(&dev->lock);
  1554. control = gr_read32(&dev->regs->control);
  1555. if (is_on)
  1556. control |= GR_CONTROL_EP;
  1557. else
  1558. control &= ~GR_CONTROL_EP;
  1559. gr_write32(&dev->regs->control, control);
  1560. spin_unlock(&dev->lock);
  1561. return 0;
  1562. }
  1563. static int gr_udc_start(struct usb_gadget *gadget,
  1564. struct usb_gadget_driver *driver)
  1565. {
  1566. struct gr_udc *dev = to_gr_udc(gadget);
  1567. spin_lock(&dev->lock);
  1568. /* Hook up the driver */
  1569. dev->driver = driver;
  1570. /* Get ready for host detection */
  1571. gr_enable_vbus_detect(dev);
  1572. spin_unlock(&dev->lock);
  1573. return 0;
  1574. }
  1575. static int gr_udc_stop(struct usb_gadget *gadget)
  1576. {
  1577. struct gr_udc *dev = to_gr_udc(gadget);
  1578. unsigned long flags;
  1579. spin_lock_irqsave(&dev->lock, flags);
  1580. dev->driver = NULL;
  1581. gr_stop_activity(dev);
  1582. spin_unlock_irqrestore(&dev->lock, flags);
  1583. return 0;
  1584. }
  1585. static const struct usb_gadget_ops gr_ops = {
  1586. .get_frame = gr_get_frame,
  1587. .wakeup = gr_wakeup,
  1588. .pullup = gr_pullup,
  1589. .udc_start = gr_udc_start,
  1590. .udc_stop = gr_udc_stop,
  1591. /* Other operations not supported */
  1592. };
  1593. /* ---------------------------------------------------------------------- */
  1594. /* Module probe, removal and of-matching */
  1595. static const char * const onames[] = {
  1596. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1597. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1598. "ep12out", "ep13out", "ep14out", "ep15out"
  1599. };
  1600. static const char * const inames[] = {
  1601. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1602. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1603. "ep12in", "ep13in", "ep14in", "ep15in"
  1604. };
  1605. /* Must be called with dev->lock held */
  1606. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1607. {
  1608. struct gr_ep *ep;
  1609. struct gr_request *req;
  1610. struct usb_request *_req;
  1611. void *buf;
  1612. if (is_in) {
  1613. ep = &dev->epi[num];
  1614. ep->ep.name = inames[num];
  1615. ep->regs = &dev->regs->epi[num];
  1616. } else {
  1617. ep = &dev->epo[num];
  1618. ep->ep.name = onames[num];
  1619. ep->regs = &dev->regs->epo[num];
  1620. }
  1621. gr_ep_reset(ep);
  1622. ep->num = num;
  1623. ep->is_in = is_in;
  1624. ep->dev = dev;
  1625. ep->ep.ops = &gr_ep_ops;
  1626. INIT_LIST_HEAD(&ep->queue);
  1627. if (num == 0) {
  1628. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1629. if (!_req)
  1630. return -ENOMEM;
  1631. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1632. if (!buf) {
  1633. gr_free_request(&ep->ep, _req);
  1634. return -ENOMEM;
  1635. }
  1636. req = container_of(_req, struct gr_request, req);
  1637. req->req.buf = buf;
  1638. req->req.length = MAX_CTRL_PL_SIZE;
  1639. if (is_in)
  1640. dev->ep0reqi = req; /* Complete gets set as used */
  1641. else
  1642. dev->ep0reqo = req; /* Completion treated separately */
  1643. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1644. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1645. ep->ep.caps.type_control = true;
  1646. } else {
  1647. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1648. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1649. ep->ep.caps.type_iso = true;
  1650. ep->ep.caps.type_bulk = true;
  1651. ep->ep.caps.type_int = true;
  1652. }
  1653. list_add_tail(&ep->ep_list, &dev->ep_list);
  1654. if (is_in)
  1655. ep->ep.caps.dir_in = true;
  1656. else
  1657. ep->ep.caps.dir_out = true;
  1658. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1659. &ep->tailbuf_paddr, GFP_ATOMIC);
  1660. if (!ep->tailbuf)
  1661. return -ENOMEM;
  1662. return 0;
  1663. }
  1664. /* Must be called with dev->lock held */
  1665. static int gr_udc_init(struct gr_udc *dev)
  1666. {
  1667. struct device_node *np = dev->dev->of_node;
  1668. u32 epctrl_val;
  1669. u32 dmactrl_val;
  1670. int i;
  1671. int ret = 0;
  1672. u32 bufsize;
  1673. gr_set_address(dev, 0);
  1674. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1675. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1676. dev->gadget.ep0 = &dev->epi[0].ep;
  1677. INIT_LIST_HEAD(&dev->ep_list);
  1678. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1679. for (i = 0; i < dev->nepo; i++) {
  1680. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1681. bufsize = 1024;
  1682. ret = gr_ep_init(dev, i, 0, bufsize);
  1683. if (ret)
  1684. return ret;
  1685. }
  1686. for (i = 0; i < dev->nepi; i++) {
  1687. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1688. bufsize = 1024;
  1689. ret = gr_ep_init(dev, i, 1, bufsize);
  1690. if (ret)
  1691. return ret;
  1692. }
  1693. /* Must be disabled by default */
  1694. dev->remote_wakeup = 0;
  1695. /* Enable ep0out and ep0in */
  1696. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1697. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1698. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1699. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1700. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1701. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1702. return 0;
  1703. }
  1704. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1705. {
  1706. struct gr_ep *ep;
  1707. if (is_in)
  1708. ep = &dev->epi[num];
  1709. else
  1710. ep = &dev->epo[num];
  1711. if (ep->tailbuf)
  1712. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1713. ep->tailbuf, ep->tailbuf_paddr);
  1714. }
  1715. static int gr_remove(struct platform_device *pdev)
  1716. {
  1717. struct gr_udc *dev = platform_get_drvdata(pdev);
  1718. int i;
  1719. if (dev->added)
  1720. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1721. if (dev->driver)
  1722. return -EBUSY;
  1723. gr_dfs_delete(dev);
  1724. dma_pool_destroy(dev->desc_pool);
  1725. platform_set_drvdata(pdev, NULL);
  1726. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1727. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1728. for (i = 0; i < dev->nepo; i++)
  1729. gr_ep_remove(dev, i, 0);
  1730. for (i = 0; i < dev->nepi; i++)
  1731. gr_ep_remove(dev, i, 1);
  1732. return 0;
  1733. }
  1734. static int gr_request_irq(struct gr_udc *dev, int irq)
  1735. {
  1736. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1737. IRQF_SHARED, driver_name, dev);
  1738. }
  1739. static int gr_probe(struct platform_device *pdev)
  1740. {
  1741. struct gr_udc *dev;
  1742. struct gr_regs __iomem *regs;
  1743. int retval;
  1744. u32 status;
  1745. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1746. if (!dev)
  1747. return -ENOMEM;
  1748. dev->dev = &pdev->dev;
  1749. regs = devm_platform_ioremap_resource(pdev, 0);
  1750. if (IS_ERR(regs))
  1751. return PTR_ERR(regs);
  1752. dev->irq = platform_get_irq(pdev, 0);
  1753. if (dev->irq <= 0)
  1754. return -ENODEV;
  1755. /* Some core configurations has separate irqs for IN and OUT events */
  1756. dev->irqi = platform_get_irq(pdev, 1);
  1757. if (dev->irqi > 0) {
  1758. dev->irqo = platform_get_irq(pdev, 2);
  1759. if (dev->irqo <= 0)
  1760. return -ENODEV;
  1761. } else {
  1762. dev->irqi = 0;
  1763. }
  1764. dev->gadget.name = driver_name;
  1765. dev->gadget.max_speed = USB_SPEED_HIGH;
  1766. dev->gadget.ops = &gr_ops;
  1767. spin_lock_init(&dev->lock);
  1768. dev->regs = regs;
  1769. platform_set_drvdata(pdev, dev);
  1770. /* Determine number of endpoints and data interface mode */
  1771. status = gr_read32(&dev->regs->status);
  1772. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1773. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1774. if (!(status & GR_STATUS_DM)) {
  1775. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1776. return -ENODEV;
  1777. }
  1778. /* --- Effects of the following calls might need explicit cleanup --- */
  1779. /* Create DMA pool for descriptors */
  1780. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1781. sizeof(struct gr_dma_desc), 4, 0);
  1782. if (!dev->desc_pool) {
  1783. dev_err(dev->dev, "Could not allocate DMA pool");
  1784. return -ENOMEM;
  1785. }
  1786. /* Inside lock so that no gadget can use this udc until probe is done */
  1787. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1788. if (retval) {
  1789. dev_err(dev->dev, "Could not add gadget udc");
  1790. goto out;
  1791. }
  1792. dev->added = 1;
  1793. spin_lock(&dev->lock);
  1794. retval = gr_udc_init(dev);
  1795. if (retval) {
  1796. spin_unlock(&dev->lock);
  1797. goto out;
  1798. }
  1799. /* Clear all interrupt enables that might be left on since last boot */
  1800. gr_disable_interrupts_and_pullup(dev);
  1801. spin_unlock(&dev->lock);
  1802. gr_dfs_create(dev);
  1803. retval = gr_request_irq(dev, dev->irq);
  1804. if (retval) {
  1805. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1806. goto out;
  1807. }
  1808. if (dev->irqi) {
  1809. retval = gr_request_irq(dev, dev->irqi);
  1810. if (retval) {
  1811. dev_err(dev->dev, "Failed to request irqi %d\n",
  1812. dev->irqi);
  1813. goto out;
  1814. }
  1815. retval = gr_request_irq(dev, dev->irqo);
  1816. if (retval) {
  1817. dev_err(dev->dev, "Failed to request irqo %d\n",
  1818. dev->irqo);
  1819. goto out;
  1820. }
  1821. }
  1822. if (dev->irqi)
  1823. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1824. dev->irq, dev->irqi, dev->irqo);
  1825. else
  1826. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1827. out:
  1828. if (retval)
  1829. gr_remove(pdev);
  1830. return retval;
  1831. }
  1832. static const struct of_device_id gr_match[] = {
  1833. {.name = "GAISLER_USBDC"},
  1834. {.name = "01_021"},
  1835. {},
  1836. };
  1837. MODULE_DEVICE_TABLE(of, gr_match);
  1838. static struct platform_driver gr_driver = {
  1839. .driver = {
  1840. .name = DRIVER_NAME,
  1841. .of_match_table = gr_match,
  1842. },
  1843. .probe = gr_probe,
  1844. .remove = gr_remove,
  1845. };
  1846. module_platform_driver(gr_driver);
  1847. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1848. MODULE_DESCRIPTION(DRIVER_DESC);
  1849. MODULE_LICENSE("GPL");