aspeed_udc.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2021 Aspeed Technology Inc.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/prefetch.h>
  14. #include <linux/usb/ch9.h>
  15. #include <linux/usb/gadget.h>
  16. #include <linux/slab.h>
  17. #define AST_UDC_NUM_ENDPOINTS (1 + 4)
  18. #define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */
  19. #define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
  20. #define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */
  21. #define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */
  22. #define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT)
  23. /*****************************
  24. * *
  25. * UDC register definitions *
  26. * *
  27. *****************************/
  28. #define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */
  29. #define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */
  30. #define AST_UDC_IER 0x08 /* Interrupt Control Register */
  31. #define AST_UDC_ISR 0x0C /* Interrupt Status Register */
  32. #define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */
  33. #define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */
  34. #define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */
  35. #define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */
  36. #define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */
  37. #define AST_UDC_STS 0x24 /* USB Status Register */
  38. #define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */
  39. #define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */
  40. #define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */
  41. #define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */
  42. #define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
  43. #define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
  44. /* Main control reg */
  45. #define USB_PHY_CLK_EN BIT(31)
  46. #define USB_FIFO_DYN_PWRD_EN BIT(19)
  47. #define USB_EP_LONG_DESC BIT(18)
  48. #define USB_BIST_TEST_PASS BIT(13)
  49. #define USB_BIST_TURN_ON BIT(12)
  50. #define USB_PHY_RESET_DIS BIT(11)
  51. #define USB_TEST_MODE(x) ((x) << 8)
  52. #define USB_FORCE_TIMER_HS BIT(7)
  53. #define USB_FORCE_HS BIT(6)
  54. #define USB_REMOTE_WAKEUP_12MS BIT(5)
  55. #define USB_REMOTE_WAKEUP_EN BIT(4)
  56. #define USB_AUTO_REMOTE_WAKEUP_EN BIT(3)
  57. #define USB_STOP_CLK_IN_SUPEND BIT(2)
  58. #define USB_UPSTREAM_FS BIT(1)
  59. #define USB_UPSTREAM_EN BIT(0)
  60. /* Main config reg */
  61. #define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
  62. #define UDC_CFG_ADDR_MASK (0x3f)
  63. /* Interrupt ctrl & status reg */
  64. #define UDC_IRQ_EP_POOL_NAK BIT(17)
  65. #define UDC_IRQ_EP_POOL_ACK_STALL BIT(16)
  66. #define UDC_IRQ_BUS_RESUME BIT(8)
  67. #define UDC_IRQ_BUS_SUSPEND BIT(7)
  68. #define UDC_IRQ_BUS_RESET BIT(6)
  69. #define UDC_IRQ_EP0_IN_DATA_NAK BIT(4)
  70. #define UDC_IRQ_EP0_IN_ACK_STALL BIT(3)
  71. #define UDC_IRQ_EP0_OUT_NAK BIT(2)
  72. #define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1)
  73. #define UDC_IRQ_EP0_SETUP BIT(0)
  74. #define UDC_IRQ_ACK_ALL (0x1ff)
  75. /* EP isr reg */
  76. #define USB_EP3_ISR BIT(3)
  77. #define USB_EP2_ISR BIT(2)
  78. #define USB_EP1_ISR BIT(1)
  79. #define USB_EP0_ISR BIT(0)
  80. #define UDC_IRQ_EP_ACK_ALL (0xf)
  81. /*Soft reset reg */
  82. #define ROOT_UDC_SOFT_RESET BIT(0)
  83. /* USB status reg */
  84. #define UDC_STS_HIGHSPEED BIT(27)
  85. /* Programmable EP data toggle */
  86. #define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3)
  87. /* EP0 ctrl reg */
  88. #define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f)
  89. #define EP0_TX_LEN(x) ((x & 0x7f) << 8)
  90. #define EP0_RX_BUFF_RDY BIT(2)
  91. #define EP0_TX_BUFF_RDY BIT(1)
  92. #define EP0_STALL BIT(0)
  93. /*************************************
  94. * *
  95. * per-endpoint register definitions *
  96. * *
  97. *************************************/
  98. #define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */
  99. #define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */
  100. #define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */
  101. #define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */
  102. #define AST_UDC_EP_BASE 0x200
  103. #define AST_UDC_EP_OFFSET 0x10
  104. /* EP config reg */
  105. #define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16)
  106. #define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14)
  107. #define EP_AUTO_DATA_DISABLE (0x1 << 13)
  108. #define EP_SET_EP_STALL (0x1 << 12)
  109. #define EP_SET_EP_NUM(x) ((x & 0xf) << 8)
  110. #define EP_SET_TYPE_MASK(x) ((x) << 5)
  111. #define EP_TYPE_BULK (0x1)
  112. #define EP_TYPE_INT (0x2)
  113. #define EP_TYPE_ISO (0x3)
  114. #define EP_DIR_OUT (0x1 << 4)
  115. #define EP_ALLOCATED_MASK (0x7 << 1)
  116. #define EP_ENABLE BIT(0)
  117. /* EP DMA ctrl reg */
  118. #define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf)
  119. #define EP_DMA_CTRL_STS_RX_IDLE 0x0
  120. #define EP_DMA_CTRL_STS_TX_IDLE 0x8
  121. #define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3)
  122. #define EP_DMA_CTRL_RESET (0x1 << 2)
  123. #define EP_DMA_SINGLE_STAGE (0x1 << 1)
  124. #define EP_DMA_DESC_MODE (0x1 << 0)
  125. /* EP DMA status reg */
  126. #define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16)
  127. #define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff)
  128. #define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff)
  129. #define EP_DMA_GET_WPTR(x) ((x) & 0xff)
  130. #define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
  131. /* EP desc reg */
  132. #define AST_EP_DMA_DESC_INTR_ENABLE BIT(31)
  133. #define AST_EP_DMA_DESC_PID_DATA0 (0 << 14)
  134. #define AST_EP_DMA_DESC_PID_DATA2 BIT(14)
  135. #define AST_EP_DMA_DESC_PID_DATA1 (2 << 14)
  136. #define AST_EP_DMA_DESC_PID_MDATA (3 << 14)
  137. #define EP_DESC1_IN_LEN(x) ((x) & 0x1fff)
  138. #define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */
  139. struct ast_udc_request {
  140. struct usb_request req;
  141. struct list_head queue;
  142. unsigned mapped:1;
  143. unsigned int actual_dma_length;
  144. u32 saved_dma_wptr;
  145. };
  146. #define to_ast_req(__req) container_of(__req, struct ast_udc_request, req)
  147. struct ast_dma_desc {
  148. u32 des_0;
  149. u32 des_1;
  150. };
  151. struct ast_udc_ep {
  152. struct usb_ep ep;
  153. /* Request queue */
  154. struct list_head queue;
  155. struct ast_udc_dev *udc;
  156. void __iomem *ep_reg;
  157. void *epn_buf;
  158. dma_addr_t epn_buf_dma;
  159. const struct usb_endpoint_descriptor *desc;
  160. /* DMA Descriptors */
  161. struct ast_dma_desc *descs;
  162. dma_addr_t descs_dma;
  163. u32 descs_wptr;
  164. u32 chunk_max;
  165. bool dir_in:1;
  166. unsigned stopped:1;
  167. bool desc_mode:1;
  168. };
  169. #define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep)
  170. struct ast_udc_dev {
  171. struct platform_device *pdev;
  172. void __iomem *reg;
  173. int irq;
  174. spinlock_t lock;
  175. struct clk *clk;
  176. struct work_struct wake_work;
  177. /* EP0 DMA buffers allocated in one chunk */
  178. void *ep0_buf;
  179. dma_addr_t ep0_buf_dma;
  180. struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS];
  181. struct usb_gadget gadget;
  182. struct usb_gadget_driver *driver;
  183. void __iomem *creq;
  184. enum usb_device_state suspended_from;
  185. int desc_mode;
  186. /* Force full speed only */
  187. bool force_usb1:1;
  188. unsigned is_control_tx:1;
  189. bool wakeup_en:1;
  190. };
  191. #define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget)
  192. static const char * const ast_ep_name[] = {
  193. "ep0", "ep1", "ep2", "ep3", "ep4"
  194. };
  195. #ifdef AST_UDC_DEBUG_ALL
  196. #define AST_UDC_DEBUG
  197. #define AST_SETUP_DEBUG
  198. #define AST_EP_DEBUG
  199. #define AST_ISR_DEBUG
  200. #endif
  201. #ifdef AST_SETUP_DEBUG
  202. #define SETUP_DBG(u, fmt, ...) \
  203. dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
  204. #else
  205. #define SETUP_DBG(u, fmt, ...)
  206. #endif
  207. #ifdef AST_EP_DEBUG
  208. #define EP_DBG(e, fmt, ...) \
  209. dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \
  210. (e)->ep.name, ##__VA_ARGS__)
  211. #else
  212. #define EP_DBG(ep, fmt, ...) ((void)(ep))
  213. #endif
  214. #ifdef AST_UDC_DEBUG
  215. #define UDC_DBG(u, fmt, ...) \
  216. dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
  217. #else
  218. #define UDC_DBG(u, fmt, ...)
  219. #endif
  220. #ifdef AST_ISR_DEBUG
  221. #define ISR_DBG(u, fmt, ...) \
  222. dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
  223. #else
  224. #define ISR_DBG(u, fmt, ...)
  225. #endif
  226. /*-------------------------------------------------------------------------*/
  227. #define ast_udc_read(udc, offset) \
  228. readl((udc)->reg + (offset))
  229. #define ast_udc_write(udc, val, offset) \
  230. writel((val), (udc)->reg + (offset))
  231. #define ast_ep_read(ep, reg) \
  232. readl((ep)->ep_reg + (reg))
  233. #define ast_ep_write(ep, val, reg) \
  234. writel((val), (ep)->ep_reg + (reg))
  235. /*-------------------------------------------------------------------------*/
  236. static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req,
  237. int status)
  238. {
  239. struct ast_udc_dev *udc = ep->udc;
  240. EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n",
  241. req, req->req.actual, req->req.length,
  242. (u32)req->req.buf, ep->dir_in);
  243. list_del(&req->queue);
  244. if (req->req.status == -EINPROGRESS)
  245. req->req.status = status;
  246. else
  247. status = req->req.status;
  248. if (status && status != -ESHUTDOWN)
  249. EP_DBG(ep, "done req:%p, status:%d\n", req, status);
  250. spin_unlock(&udc->lock);
  251. usb_gadget_giveback_request(&ep->ep, &req->req);
  252. spin_lock(&udc->lock);
  253. }
  254. static void ast_udc_nuke(struct ast_udc_ep *ep, int status)
  255. {
  256. int count = 0;
  257. while (!list_empty(&ep->queue)) {
  258. struct ast_udc_request *req;
  259. req = list_entry(ep->queue.next, struct ast_udc_request,
  260. queue);
  261. ast_udc_done(ep, req, status);
  262. count++;
  263. }
  264. if (count)
  265. EP_DBG(ep, "Nuked %d request(s)\n", count);
  266. }
  267. /*
  268. * Stop activity on all endpoints.
  269. * Device controller for which EP activity is to be stopped.
  270. *
  271. * All the endpoints are stopped and any pending transfer requests if any on
  272. * the endpoint are terminated.
  273. */
  274. static void ast_udc_stop_activity(struct ast_udc_dev *udc)
  275. {
  276. struct ast_udc_ep *ep;
  277. int i;
  278. for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
  279. ep = &udc->ep[i];
  280. ep->stopped = 1;
  281. ast_udc_nuke(ep, -ESHUTDOWN);
  282. }
  283. }
  284. static int ast_udc_ep_enable(struct usb_ep *_ep,
  285. const struct usb_endpoint_descriptor *desc)
  286. {
  287. u16 maxpacket = usb_endpoint_maxp(desc);
  288. struct ast_udc_ep *ep = to_ast_ep(_ep);
  289. struct ast_udc_dev *udc = ep->udc;
  290. u8 epnum = usb_endpoint_num(desc);
  291. unsigned long flags;
  292. u32 ep_conf = 0;
  293. u8 dir_in;
  294. u8 type;
  295. if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
  296. maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
  297. EP_DBG(ep, "Failed, invalid EP enable param\n");
  298. return -EINVAL;
  299. }
  300. if (!udc->driver) {
  301. EP_DBG(ep, "bogus device state\n");
  302. return -ESHUTDOWN;
  303. }
  304. EP_DBG(ep, "maxpacket:0x%x\n", maxpacket);
  305. spin_lock_irqsave(&udc->lock, flags);
  306. ep->desc = desc;
  307. ep->stopped = 0;
  308. ep->ep.maxpacket = maxpacket;
  309. ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN;
  310. if (maxpacket < AST_UDC_EPn_MAX_PACKET)
  311. ep_conf = EP_SET_MAX_PKT(maxpacket);
  312. ep_conf |= EP_SET_EP_NUM(epnum);
  313. type = usb_endpoint_type(desc);
  314. dir_in = usb_endpoint_dir_in(desc);
  315. ep->dir_in = dir_in;
  316. if (!ep->dir_in)
  317. ep_conf |= EP_DIR_OUT;
  318. EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in);
  319. switch (type) {
  320. case USB_ENDPOINT_XFER_ISOC:
  321. ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO);
  322. break;
  323. case USB_ENDPOINT_XFER_BULK:
  324. ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK);
  325. break;
  326. case USB_ENDPOINT_XFER_INT:
  327. ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT);
  328. break;
  329. }
  330. ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in;
  331. if (ep->desc_mode) {
  332. ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
  333. ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
  334. ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF);
  335. /* Enable Long Descriptor Mode */
  336. ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE,
  337. AST_UDC_EP_DMA_CTRL);
  338. ep->descs_wptr = 0;
  339. } else {
  340. ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
  341. ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL);
  342. ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
  343. }
  344. /* Cleanup data toggle just in case */
  345. ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA);
  346. /* Enable EP */
  347. ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG);
  348. EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG));
  349. spin_unlock_irqrestore(&udc->lock, flags);
  350. return 0;
  351. }
  352. static int ast_udc_ep_disable(struct usb_ep *_ep)
  353. {
  354. struct ast_udc_ep *ep = to_ast_ep(_ep);
  355. struct ast_udc_dev *udc = ep->udc;
  356. unsigned long flags;
  357. spin_lock_irqsave(&udc->lock, flags);
  358. ep->ep.desc = NULL;
  359. ep->stopped = 1;
  360. ast_udc_nuke(ep, -ESHUTDOWN);
  361. ast_ep_write(ep, 0, AST_UDC_EP_CONFIG);
  362. spin_unlock_irqrestore(&udc->lock, flags);
  363. return 0;
  364. }
  365. static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep,
  366. gfp_t gfp_flags)
  367. {
  368. struct ast_udc_ep *ep = to_ast_ep(_ep);
  369. struct ast_udc_request *req;
  370. req = kzalloc(sizeof(struct ast_udc_request), gfp_flags);
  371. if (!req) {
  372. EP_DBG(ep, "request allocation failed\n");
  373. return NULL;
  374. }
  375. INIT_LIST_HEAD(&req->queue);
  376. return &req->req;
  377. }
  378. static void ast_udc_ep_free_request(struct usb_ep *_ep,
  379. struct usb_request *_req)
  380. {
  381. struct ast_udc_request *req = to_ast_req(_req);
  382. kfree(req);
  383. }
  384. static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
  385. u16 tx_len, struct ast_udc_request *req)
  386. {
  387. struct ast_udc_dev *udc = ep->udc;
  388. struct device *dev = &udc->pdev->dev;
  389. bool last = false;
  390. int chunk, count;
  391. u32 offset;
  392. if (!ep->descs) {
  393. dev_warn(dev, "%s: Empty DMA descs list failure\n",
  394. ep->ep.name);
  395. return -EINVAL;
  396. }
  397. chunk = tx_len;
  398. offset = count = 0;
  399. EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req,
  400. "wptr", ep->descs_wptr, "dma_buf", dma_buf,
  401. "tx_len", tx_len);
  402. /* Create Descriptor Lists */
  403. while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) {
  404. ep->descs[ep->descs_wptr].des_0 = dma_buf + offset;
  405. if (chunk > ep->chunk_max) {
  406. ep->descs[ep->descs_wptr].des_1 = ep->chunk_max;
  407. } else {
  408. ep->descs[ep->descs_wptr].des_1 = chunk;
  409. last = true;
  410. }
  411. chunk -= ep->chunk_max;
  412. EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n",
  413. ep->descs_wptr,
  414. ep->descs[ep->descs_wptr].des_0,
  415. ep->descs[ep->descs_wptr].des_1);
  416. if (count == 0)
  417. req->saved_dma_wptr = ep->descs_wptr;
  418. ep->descs_wptr++;
  419. count++;
  420. if (ep->descs_wptr >= AST_UDC_DESCS_COUNT)
  421. ep->descs_wptr = 0;
  422. offset = ep->chunk_max * count;
  423. }
  424. return 0;
  425. }
  426. static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
  427. {
  428. u32 tx_len;
  429. u32 last;
  430. last = req->req.length - req->req.actual;
  431. tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
  432. EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n",
  433. req, tx_len, ep->dir_in);
  434. ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF);
  435. /* Start DMA */
  436. ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS);
  437. ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK,
  438. AST_UDC_EP_DMA_STS);
  439. }
  440. static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep,
  441. struct ast_udc_request *req)
  442. {
  443. u32 descs_max_size;
  444. u32 tx_len;
  445. u32 last;
  446. descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT;
  447. last = req->req.length - req->req.actual;
  448. tx_len = last > descs_max_size ? descs_max_size : last;
  449. EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n",
  450. req, "tx_len", tx_len, "dir_in", ep->dir_in,
  451. "dma", req->req.dma + req->req.actual,
  452. req->req.actual, req->req.length,
  453. "descs_max_size", descs_max_size);
  454. if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual,
  455. tx_len, req))
  456. req->actual_dma_length += tx_len;
  457. /* make sure CPU done everything before triggering DMA */
  458. mb();
  459. ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS);
  460. EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n",
  461. ep->descs_wptr,
  462. ast_ep_read(ep, AST_UDC_EP_DMA_STS),
  463. ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
  464. }
  465. static void ast_udc_ep0_queue(struct ast_udc_ep *ep,
  466. struct ast_udc_request *req)
  467. {
  468. struct ast_udc_dev *udc = ep->udc;
  469. u32 tx_len;
  470. u32 last;
  471. last = req->req.length - req->req.actual;
  472. tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
  473. ast_udc_write(udc, req->req.dma + req->req.actual,
  474. AST_UDC_EP0_DATA_BUFF);
  475. if (ep->dir_in) {
  476. /* IN requests, send data */
  477. SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n",
  478. "buf", (u32)req->req.buf,
  479. "dma", req->req.dma + req->req.actual,
  480. "tx_len", tx_len,
  481. req->req.actual, req->req.length,
  482. "dir_in", ep->dir_in);
  483. req->req.actual += tx_len;
  484. ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL);
  485. ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY,
  486. AST_UDC_EP0_CTRL);
  487. } else {
  488. /* OUT requests, receive data */
  489. SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n",
  490. "buf", (u32)req->req.buf,
  491. "dma", req->req.dma + req->req.actual,
  492. "len", req->req.actual, req->req.length,
  493. "dir_in", ep->dir_in);
  494. if (!req->req.length) {
  495. /* 0 len request, send tx as completion */
  496. ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
  497. ep->dir_in = 0x1;
  498. } else
  499. ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
  500. }
  501. }
  502. static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
  503. gfp_t gfp_flags)
  504. {
  505. struct ast_udc_request *req = to_ast_req(_req);
  506. struct ast_udc_ep *ep = to_ast_ep(_ep);
  507. struct ast_udc_dev *udc = ep->udc;
  508. struct device *dev = &udc->pdev->dev;
  509. unsigned long flags;
  510. int rc;
  511. if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) {
  512. dev_warn(dev, "Invalid EP request !\n");
  513. return -EINVAL;
  514. }
  515. if (ep->stopped) {
  516. dev_warn(dev, "%s is already stopped !\n", _ep->name);
  517. return -ESHUTDOWN;
  518. }
  519. spin_lock_irqsave(&udc->lock, flags);
  520. list_add_tail(&req->queue, &ep->queue);
  521. req->req.actual = 0;
  522. req->req.status = -EINPROGRESS;
  523. req->actual_dma_length = 0;
  524. rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in);
  525. if (rc) {
  526. EP_DBG(ep, "Request mapping failure %d\n", rc);
  527. dev_warn(dev, "Request mapping failure %d\n", rc);
  528. goto end;
  529. }
  530. EP_DBG(ep, "enqueue req @%p\n", req);
  531. EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n",
  532. _req->length, _req->dma, _req->zero, ep->dir_in);
  533. /* EP0 request enqueue */
  534. if (ep->ep.desc == NULL) {
  535. if ((req->req.dma % 4) != 0) {
  536. dev_warn(dev, "EP0 req dma alignment error\n");
  537. rc = -ESHUTDOWN;
  538. goto end;
  539. }
  540. ast_udc_ep0_queue(ep, req);
  541. goto end;
  542. }
  543. /* EPn request enqueue */
  544. if (list_is_singular(&ep->queue)) {
  545. if (ep->desc_mode)
  546. ast_udc_epn_kick_desc(ep, req);
  547. else
  548. ast_udc_epn_kick(ep, req);
  549. }
  550. end:
  551. spin_unlock_irqrestore(&udc->lock, flags);
  552. return rc;
  553. }
  554. static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  555. {
  556. struct ast_udc_ep *ep = to_ast_ep(_ep);
  557. struct ast_udc_dev *udc = ep->udc;
  558. struct ast_udc_request *req;
  559. unsigned long flags;
  560. int rc = 0;
  561. spin_lock_irqsave(&udc->lock, flags);
  562. /* make sure it's actually queued on this endpoint */
  563. list_for_each_entry(req, &ep->queue, queue) {
  564. if (&req->req == _req) {
  565. list_del_init(&req->queue);
  566. ast_udc_done(ep, req, -ESHUTDOWN);
  567. _req->status = -ECONNRESET;
  568. break;
  569. }
  570. }
  571. /* dequeue request not found */
  572. if (&req->req != _req)
  573. rc = -EINVAL;
  574. spin_unlock_irqrestore(&udc->lock, flags);
  575. return rc;
  576. }
  577. static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value)
  578. {
  579. struct ast_udc_ep *ep = to_ast_ep(_ep);
  580. struct ast_udc_dev *udc = ep->udc;
  581. unsigned long flags;
  582. int epnum;
  583. u32 ctrl;
  584. EP_DBG(ep, "val:%d\n", value);
  585. spin_lock_irqsave(&udc->lock, flags);
  586. epnum = usb_endpoint_num(ep->desc);
  587. /* EP0 */
  588. if (epnum == 0) {
  589. ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL);
  590. if (value)
  591. ctrl |= EP0_STALL;
  592. else
  593. ctrl &= ~EP0_STALL;
  594. ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL);
  595. } else {
  596. /* EPn */
  597. ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG);
  598. if (value)
  599. ctrl |= EP_SET_EP_STALL;
  600. else
  601. ctrl &= ~EP_SET_EP_STALL;
  602. ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG);
  603. /* only epn is stopped and waits for clear */
  604. ep->stopped = value ? 1 : 0;
  605. }
  606. spin_unlock_irqrestore(&udc->lock, flags);
  607. return 0;
  608. }
  609. static const struct usb_ep_ops ast_udc_ep_ops = {
  610. .enable = ast_udc_ep_enable,
  611. .disable = ast_udc_ep_disable,
  612. .alloc_request = ast_udc_ep_alloc_request,
  613. .free_request = ast_udc_ep_free_request,
  614. .queue = ast_udc_ep_queue,
  615. .dequeue = ast_udc_ep_dequeue,
  616. .set_halt = ast_udc_ep_set_halt,
  617. /* there's only imprecise fifo status reporting */
  618. };
  619. static void ast_udc_ep0_rx(struct ast_udc_dev *udc)
  620. {
  621. ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
  622. ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
  623. }
  624. static void ast_udc_ep0_tx(struct ast_udc_dev *udc)
  625. {
  626. ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
  627. ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
  628. }
  629. static void ast_udc_ep0_out(struct ast_udc_dev *udc)
  630. {
  631. struct device *dev = &udc->pdev->dev;
  632. struct ast_udc_ep *ep = &udc->ep[0];
  633. struct ast_udc_request *req;
  634. u16 rx_len;
  635. if (list_empty(&ep->queue))
  636. return;
  637. req = list_entry(ep->queue.next, struct ast_udc_request, queue);
  638. rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL));
  639. req->req.actual += rx_len;
  640. SETUP_DBG(udc, "req %p (%d/%d)\n", req,
  641. req->req.actual, req->req.length);
  642. if ((rx_len < ep->ep.maxpacket) ||
  643. (req->req.actual == req->req.length)) {
  644. ast_udc_ep0_tx(udc);
  645. if (!ep->dir_in)
  646. ast_udc_done(ep, req, 0);
  647. } else {
  648. if (rx_len > req->req.length) {
  649. // Issue Fix
  650. dev_warn(dev, "Something wrong (%d/%d)\n",
  651. req->req.actual, req->req.length);
  652. ast_udc_ep0_tx(udc);
  653. ast_udc_done(ep, req, 0);
  654. return;
  655. }
  656. ep->dir_in = 0;
  657. /* More works */
  658. ast_udc_ep0_queue(ep, req);
  659. }
  660. }
  661. static void ast_udc_ep0_in(struct ast_udc_dev *udc)
  662. {
  663. struct ast_udc_ep *ep = &udc->ep[0];
  664. struct ast_udc_request *req;
  665. if (list_empty(&ep->queue)) {
  666. if (udc->is_control_tx) {
  667. ast_udc_ep0_rx(udc);
  668. udc->is_control_tx = 0;
  669. }
  670. return;
  671. }
  672. req = list_entry(ep->queue.next, struct ast_udc_request, queue);
  673. SETUP_DBG(udc, "req %p (%d/%d)\n", req,
  674. req->req.actual, req->req.length);
  675. if (req->req.length == req->req.actual) {
  676. if (req->req.length)
  677. ast_udc_ep0_rx(udc);
  678. if (ep->dir_in)
  679. ast_udc_done(ep, req, 0);
  680. } else {
  681. /* More works */
  682. ast_udc_ep0_queue(ep, req);
  683. }
  684. }
  685. static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num)
  686. {
  687. struct ast_udc_ep *ep = &udc->ep[ep_num];
  688. struct ast_udc_request *req;
  689. u16 len = 0;
  690. if (list_empty(&ep->queue))
  691. return;
  692. req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
  693. len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS));
  694. req->req.actual += len;
  695. EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
  696. req->req.actual, req->req.length, "len", len);
  697. /* Done this request */
  698. if (req->req.length == req->req.actual) {
  699. ast_udc_done(ep, req, 0);
  700. req = list_first_entry_or_null(&ep->queue,
  701. struct ast_udc_request,
  702. queue);
  703. } else {
  704. /* Check for short packet */
  705. if (len < ep->ep.maxpacket) {
  706. ast_udc_done(ep, req, 0);
  707. req = list_first_entry_or_null(&ep->queue,
  708. struct ast_udc_request,
  709. queue);
  710. }
  711. }
  712. /* More requests */
  713. if (req)
  714. ast_udc_epn_kick(ep, req);
  715. }
  716. static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num)
  717. {
  718. struct ast_udc_ep *ep = &udc->ep[ep_num];
  719. struct device *dev = &udc->pdev->dev;
  720. struct ast_udc_request *req;
  721. u32 proc_sts, wr_ptr, rd_ptr;
  722. u32 len_in_desc, ctrl;
  723. u16 total_len = 0;
  724. int i;
  725. if (list_empty(&ep->queue)) {
  726. dev_warn(dev, "%s request queue empty!\n", ep->ep.name);
  727. return;
  728. }
  729. req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
  730. ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL);
  731. proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl);
  732. /* Check processing status is idle */
  733. if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE &&
  734. proc_sts != EP_DMA_CTRL_STS_TX_IDLE) {
  735. dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n",
  736. ast_ep_read(ep, AST_UDC_EP_DMA_CTRL),
  737. proc_sts);
  738. return;
  739. }
  740. ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS);
  741. rd_ptr = EP_DMA_GET_RPTR(ctrl);
  742. wr_ptr = EP_DMA_GET_WPTR(ctrl);
  743. if (rd_ptr != wr_ptr) {
  744. dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n",
  745. "rptr", rd_ptr, "wptr", wr_ptr);
  746. return;
  747. }
  748. EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr);
  749. i = req->saved_dma_wptr;
  750. do {
  751. len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1);
  752. EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc);
  753. total_len += len_in_desc;
  754. i++;
  755. if (i >= AST_UDC_DESCS_COUNT)
  756. i = 0;
  757. } while (i != wr_ptr);
  758. req->req.actual += total_len;
  759. EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
  760. req->req.actual, req->req.length, "len", total_len);
  761. /* Done this request */
  762. if (req->req.length == req->req.actual) {
  763. ast_udc_done(ep, req, 0);
  764. req = list_first_entry_or_null(&ep->queue,
  765. struct ast_udc_request,
  766. queue);
  767. } else {
  768. /* Check for short packet */
  769. if (total_len < ep->ep.maxpacket) {
  770. ast_udc_done(ep, req, 0);
  771. req = list_first_entry_or_null(&ep->queue,
  772. struct ast_udc_request,
  773. queue);
  774. }
  775. }
  776. /* More requests & dma descs not setup yet */
  777. if (req && (req->actual_dma_length == req->req.actual)) {
  778. EP_DBG(ep, "More requests\n");
  779. ast_udc_epn_kick_desc(ep, req);
  780. }
  781. }
  782. static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len)
  783. {
  784. if (len) {
  785. memcpy(udc->ep0_buf, tx_data, len);
  786. ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
  787. ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL);
  788. ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY,
  789. AST_UDC_EP0_CTRL);
  790. udc->is_control_tx = 1;
  791. } else
  792. ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
  793. }
  794. static void ast_udc_getstatus(struct ast_udc_dev *udc)
  795. {
  796. struct usb_ctrlrequest crq;
  797. struct ast_udc_ep *ep;
  798. u16 status = 0;
  799. u16 epnum = 0;
  800. memcpy_fromio(&crq, udc->creq, sizeof(crq));
  801. switch (crq.bRequestType & USB_RECIP_MASK) {
  802. case USB_RECIP_DEVICE:
  803. /* Get device status */
  804. status = 1 << USB_DEVICE_SELF_POWERED;
  805. break;
  806. case USB_RECIP_INTERFACE:
  807. break;
  808. case USB_RECIP_ENDPOINT:
  809. epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
  810. status = udc->ep[epnum].stopped;
  811. break;
  812. default:
  813. goto stall;
  814. }
  815. ep = &udc->ep[epnum];
  816. EP_DBG(ep, "status: 0x%x\n", status);
  817. ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status));
  818. return;
  819. stall:
  820. EP_DBG(ep, "Can't respond request\n");
  821. ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
  822. AST_UDC_EP0_CTRL);
  823. }
  824. static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc)
  825. {
  826. struct ast_udc_ep *ep = &udc->ep[0];
  827. struct ast_udc_request *req;
  828. struct usb_ctrlrequest crq;
  829. int req_num = 0;
  830. int rc = 0;
  831. u32 reg;
  832. memcpy_fromio(&crq, udc->creq, sizeof(crq));
  833. SETUP_DBG(udc, "SETUP packet: %02x/%02x/%04x/%04x/%04x\n",
  834. crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue),
  835. le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength));
  836. /*
  837. * Cleanup ep0 request(s) in queue because
  838. * there is a new control setup comes.
  839. */
  840. list_for_each_entry(req, &udc->ep[0].queue, queue) {
  841. req_num++;
  842. EP_DBG(ep, "there is req %p in ep0 queue !\n", req);
  843. }
  844. if (req_num)
  845. ast_udc_nuke(&udc->ep[0], -ETIMEDOUT);
  846. udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN;
  847. if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  848. switch (crq.bRequest) {
  849. case USB_REQ_SET_ADDRESS:
  850. if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED)
  851. udc->gadget.speed = USB_SPEED_HIGH;
  852. else
  853. udc->gadget.speed = USB_SPEED_FULL;
  854. SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue);
  855. reg = ast_udc_read(udc, AST_UDC_CONFIG);
  856. reg &= ~UDC_CFG_ADDR_MASK;
  857. reg |= UDC_CFG_SET_ADDR(crq.wValue);
  858. ast_udc_write(udc, reg, AST_UDC_CONFIG);
  859. goto req_complete;
  860. case USB_REQ_CLEAR_FEATURE:
  861. SETUP_DBG(udc, "ep0: CLEAR FEATURE\n");
  862. goto req_driver;
  863. case USB_REQ_SET_FEATURE:
  864. SETUP_DBG(udc, "ep0: SET FEATURE\n");
  865. goto req_driver;
  866. case USB_REQ_GET_STATUS:
  867. ast_udc_getstatus(udc);
  868. return;
  869. default:
  870. goto req_driver;
  871. }
  872. }
  873. req_driver:
  874. if (udc->driver) {
  875. SETUP_DBG(udc, "Forwarding %s to gadget...\n",
  876. udc->gadget.name);
  877. spin_unlock(&udc->lock);
  878. rc = udc->driver->setup(&udc->gadget, &crq);
  879. spin_lock(&udc->lock);
  880. } else {
  881. SETUP_DBG(udc, "No gadget for request !\n");
  882. }
  883. if (rc >= 0)
  884. return;
  885. /* Stall if gadget failed */
  886. SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc);
  887. ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
  888. AST_UDC_EP0_CTRL);
  889. return;
  890. req_complete:
  891. SETUP_DBG(udc, "ep0: Sending IN status without data\n");
  892. ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
  893. }
  894. static irqreturn_t ast_udc_isr(int irq, void *data)
  895. {
  896. struct ast_udc_dev *udc = (struct ast_udc_dev *)data;
  897. struct ast_udc_ep *ep;
  898. u32 isr, ep_isr;
  899. int i;
  900. spin_lock(&udc->lock);
  901. isr = ast_udc_read(udc, AST_UDC_ISR);
  902. if (!isr)
  903. goto done;
  904. /* Ack interrupts */
  905. ast_udc_write(udc, isr, AST_UDC_ISR);
  906. if (isr & UDC_IRQ_BUS_RESET) {
  907. ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n");
  908. udc->gadget.speed = USB_SPEED_UNKNOWN;
  909. ep = &udc->ep[1];
  910. EP_DBG(ep, "dctrl:0x%x\n",
  911. ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
  912. if (udc->driver && udc->driver->reset) {
  913. spin_unlock(&udc->lock);
  914. udc->driver->reset(&udc->gadget);
  915. spin_lock(&udc->lock);
  916. }
  917. }
  918. if (isr & UDC_IRQ_BUS_SUSPEND) {
  919. ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n");
  920. udc->suspended_from = udc->gadget.state;
  921. usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED);
  922. if (udc->driver && udc->driver->suspend) {
  923. spin_unlock(&udc->lock);
  924. udc->driver->suspend(&udc->gadget);
  925. spin_lock(&udc->lock);
  926. }
  927. }
  928. if (isr & UDC_IRQ_BUS_RESUME) {
  929. ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n");
  930. usb_gadget_set_state(&udc->gadget, udc->suspended_from);
  931. if (udc->driver && udc->driver->resume) {
  932. spin_unlock(&udc->lock);
  933. udc->driver->resume(&udc->gadget);
  934. spin_lock(&udc->lock);
  935. }
  936. }
  937. if (isr & UDC_IRQ_EP0_IN_ACK_STALL) {
  938. ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n");
  939. ast_udc_ep0_in(udc);
  940. }
  941. if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) {
  942. ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n");
  943. ast_udc_ep0_out(udc);
  944. }
  945. if (isr & UDC_IRQ_EP0_SETUP) {
  946. ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n");
  947. ast_udc_ep0_handle_setup(udc);
  948. }
  949. if (isr & UDC_IRQ_EP_POOL_ACK_STALL) {
  950. ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n");
  951. ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR);
  952. /* Ack EP interrupts */
  953. ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR);
  954. /* Handle each EP */
  955. for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) {
  956. if (ep_isr & (0x1 << i)) {
  957. ep = &udc->ep[i + 1];
  958. if (ep->desc_mode)
  959. ast_udc_epn_handle_desc(udc, i + 1);
  960. else
  961. ast_udc_epn_handle(udc, i + 1);
  962. }
  963. }
  964. }
  965. done:
  966. spin_unlock(&udc->lock);
  967. return IRQ_HANDLED;
  968. }
  969. static int ast_udc_gadget_getframe(struct usb_gadget *gadget)
  970. {
  971. struct ast_udc_dev *udc = to_ast_dev(gadget);
  972. return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff;
  973. }
  974. static void ast_udc_wake_work(struct work_struct *work)
  975. {
  976. struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev,
  977. wake_work);
  978. unsigned long flags;
  979. u32 ctrl;
  980. spin_lock_irqsave(&udc->lock, flags);
  981. UDC_DBG(udc, "Wakeup Host !\n");
  982. ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL);
  983. ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL);
  984. spin_unlock_irqrestore(&udc->lock, flags);
  985. }
  986. static void ast_udc_wakeup_all(struct ast_udc_dev *udc)
  987. {
  988. /*
  989. * A device is trying to wake the world, because this
  990. * can recurse into the device, we break the call chain
  991. * using a work queue
  992. */
  993. schedule_work(&udc->wake_work);
  994. }
  995. static int ast_udc_wakeup(struct usb_gadget *gadget)
  996. {
  997. struct ast_udc_dev *udc = to_ast_dev(gadget);
  998. unsigned long flags;
  999. int rc = 0;
  1000. spin_lock_irqsave(&udc->lock, flags);
  1001. if (!udc->wakeup_en) {
  1002. UDC_DBG(udc, "Remote Wakeup is disabled\n");
  1003. rc = -EINVAL;
  1004. goto err;
  1005. }
  1006. UDC_DBG(udc, "Device initiated wakeup\n");
  1007. ast_udc_wakeup_all(udc);
  1008. err:
  1009. spin_unlock_irqrestore(&udc->lock, flags);
  1010. return rc;
  1011. }
  1012. /*
  1013. * Activate/Deactivate link with host
  1014. */
  1015. static int ast_udc_pullup(struct usb_gadget *gadget, int is_on)
  1016. {
  1017. struct ast_udc_dev *udc = to_ast_dev(gadget);
  1018. unsigned long flags;
  1019. u32 ctrl;
  1020. spin_lock_irqsave(&udc->lock, flags);
  1021. UDC_DBG(udc, "is_on: %d\n", is_on);
  1022. if (is_on)
  1023. ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN;
  1024. else
  1025. ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
  1026. ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
  1027. spin_unlock_irqrestore(&udc->lock, flags);
  1028. return 0;
  1029. }
  1030. static int ast_udc_start(struct usb_gadget *gadget,
  1031. struct usb_gadget_driver *driver)
  1032. {
  1033. struct ast_udc_dev *udc = to_ast_dev(gadget);
  1034. struct ast_udc_ep *ep;
  1035. unsigned long flags;
  1036. int i;
  1037. spin_lock_irqsave(&udc->lock, flags);
  1038. UDC_DBG(udc, "\n");
  1039. udc->driver = driver;
  1040. udc->gadget.dev.of_node = udc->pdev->dev.of_node;
  1041. for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
  1042. ep = &udc->ep[i];
  1043. ep->stopped = 0;
  1044. }
  1045. spin_unlock_irqrestore(&udc->lock, flags);
  1046. return 0;
  1047. }
  1048. static int ast_udc_stop(struct usb_gadget *gadget)
  1049. {
  1050. struct ast_udc_dev *udc = to_ast_dev(gadget);
  1051. unsigned long flags;
  1052. u32 ctrl;
  1053. spin_lock_irqsave(&udc->lock, flags);
  1054. UDC_DBG(udc, "\n");
  1055. ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
  1056. ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
  1057. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1058. udc->driver = NULL;
  1059. ast_udc_stop_activity(udc);
  1060. usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED);
  1061. spin_unlock_irqrestore(&udc->lock, flags);
  1062. return 0;
  1063. }
  1064. static const struct usb_gadget_ops ast_udc_ops = {
  1065. .get_frame = ast_udc_gadget_getframe,
  1066. .wakeup = ast_udc_wakeup,
  1067. .pullup = ast_udc_pullup,
  1068. .udc_start = ast_udc_start,
  1069. .udc_stop = ast_udc_stop,
  1070. };
  1071. /*
  1072. * Support 1 Control Endpoint.
  1073. * Support multiple programmable endpoints that can be configured to
  1074. * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint.
  1075. */
  1076. static void ast_udc_init_ep(struct ast_udc_dev *udc)
  1077. {
  1078. struct ast_udc_ep *ep;
  1079. int i;
  1080. for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
  1081. ep = &udc->ep[i];
  1082. ep->ep.name = ast_ep_name[i];
  1083. if (i == 0) {
  1084. ep->ep.caps.type_control = true;
  1085. } else {
  1086. ep->ep.caps.type_iso = true;
  1087. ep->ep.caps.type_bulk = true;
  1088. ep->ep.caps.type_int = true;
  1089. }
  1090. ep->ep.caps.dir_in = true;
  1091. ep->ep.caps.dir_out = true;
  1092. ep->ep.ops = &ast_udc_ep_ops;
  1093. ep->udc = udc;
  1094. INIT_LIST_HEAD(&ep->queue);
  1095. if (i == 0) {
  1096. usb_ep_set_maxpacket_limit(&ep->ep,
  1097. AST_UDC_EP0_MAX_PACKET);
  1098. continue;
  1099. }
  1100. ep->ep_reg = udc->reg + AST_UDC_EP_BASE +
  1101. (AST_UDC_EP_OFFSET * (i - 1));
  1102. ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE);
  1103. ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE);
  1104. usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET);
  1105. ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET;
  1106. ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET;
  1107. ep->descs_wptr = 0;
  1108. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1109. }
  1110. }
  1111. static void ast_udc_init_dev(struct ast_udc_dev *udc)
  1112. {
  1113. INIT_WORK(&udc->wake_work, ast_udc_wake_work);
  1114. }
  1115. static void ast_udc_init_hw(struct ast_udc_dev *udc)
  1116. {
  1117. u32 ctrl;
  1118. /* Enable PHY */
  1119. ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS;
  1120. ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
  1121. udelay(1);
  1122. ast_udc_write(udc, 0, AST_UDC_DEV_RESET);
  1123. /* Set descriptor ring size */
  1124. if (AST_UDC_DESCS_COUNT == 256) {
  1125. ctrl |= USB_EP_LONG_DESC;
  1126. ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
  1127. }
  1128. /* Mask & ack all interrupts before installing the handler */
  1129. ast_udc_write(udc, 0, AST_UDC_IER);
  1130. ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR);
  1131. /* Enable some interrupts */
  1132. ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME |
  1133. UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET |
  1134. UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL |
  1135. UDC_IRQ_EP0_SETUP;
  1136. ast_udc_write(udc, ctrl, AST_UDC_IER);
  1137. /* Cleanup and enable ep ACK interrupts */
  1138. ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER);
  1139. ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR);
  1140. ast_udc_write(udc, 0, AST_UDC_EP0_CTRL);
  1141. }
  1142. static int ast_udc_remove(struct platform_device *pdev)
  1143. {
  1144. struct ast_udc_dev *udc = platform_get_drvdata(pdev);
  1145. unsigned long flags;
  1146. u32 ctrl;
  1147. usb_del_gadget_udc(&udc->gadget);
  1148. if (udc->driver)
  1149. return -EBUSY;
  1150. spin_lock_irqsave(&udc->lock, flags);
  1151. /* Disable upstream port connection */
  1152. ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
  1153. ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
  1154. clk_disable_unprepare(udc->clk);
  1155. spin_unlock_irqrestore(&udc->lock, flags);
  1156. if (udc->ep0_buf)
  1157. dma_free_coherent(&pdev->dev,
  1158. AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS,
  1159. udc->ep0_buf,
  1160. udc->ep0_buf_dma);
  1161. udc->ep0_buf = NULL;
  1162. return 0;
  1163. }
  1164. static int ast_udc_probe(struct platform_device *pdev)
  1165. {
  1166. enum usb_device_speed max_speed;
  1167. struct device *dev = &pdev->dev;
  1168. struct ast_udc_dev *udc;
  1169. struct resource *res;
  1170. int rc;
  1171. udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL);
  1172. if (!udc)
  1173. return -ENOMEM;
  1174. udc->gadget.dev.parent = dev;
  1175. udc->pdev = pdev;
  1176. spin_lock_init(&udc->lock);
  1177. udc->gadget.ops = &ast_udc_ops;
  1178. udc->gadget.ep0 = &udc->ep[0].ep;
  1179. udc->gadget.name = "aspeed-udc";
  1180. udc->gadget.dev.init_name = "gadget";
  1181. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1182. udc->reg = devm_ioremap_resource(&pdev->dev, res);
  1183. if (IS_ERR(udc->reg)) {
  1184. dev_err(&pdev->dev, "Failed to map resources\n");
  1185. return PTR_ERR(udc->reg);
  1186. }
  1187. platform_set_drvdata(pdev, udc);
  1188. udc->clk = devm_clk_get(&pdev->dev, NULL);
  1189. if (IS_ERR(udc->clk)) {
  1190. rc = PTR_ERR(udc->clk);
  1191. goto err;
  1192. }
  1193. rc = clk_prepare_enable(udc->clk);
  1194. if (rc) {
  1195. dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc);
  1196. goto err;
  1197. }
  1198. /* Check if we need to limit the HW to USB1 */
  1199. max_speed = usb_get_maximum_speed(&pdev->dev);
  1200. if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
  1201. udc->force_usb1 = true;
  1202. /*
  1203. * Allocate DMA buffers for all EPs in one chunk
  1204. */
  1205. udc->ep0_buf = dma_alloc_coherent(&pdev->dev,
  1206. AST_UDC_EP_DMA_SIZE *
  1207. AST_UDC_NUM_ENDPOINTS,
  1208. &udc->ep0_buf_dma, GFP_KERNEL);
  1209. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1210. udc->gadget.max_speed = USB_SPEED_HIGH;
  1211. udc->creq = udc->reg + AST_UDC_SETUP0;
  1212. /*
  1213. * Support single stage mode or 32/256 stages descriptor mode.
  1214. * Set default as Descriptor Mode.
  1215. */
  1216. udc->desc_mode = AST_UDC_DESC_MODE;
  1217. dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ?
  1218. "descriptor mode" : "single mode");
  1219. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1220. INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
  1221. /* Initialized udc ep */
  1222. ast_udc_init_ep(udc);
  1223. /* Initialized udc device */
  1224. ast_udc_init_dev(udc);
  1225. /* Initialized udc hardware */
  1226. ast_udc_init_hw(udc);
  1227. /* Find interrupt and install handler */
  1228. udc->irq = platform_get_irq(pdev, 0);
  1229. if (udc->irq < 0) {
  1230. rc = udc->irq;
  1231. goto err;
  1232. }
  1233. rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0,
  1234. KBUILD_MODNAME, udc);
  1235. if (rc) {
  1236. dev_err(&pdev->dev, "Failed to request interrupt\n");
  1237. goto err;
  1238. }
  1239. rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
  1240. if (rc) {
  1241. dev_err(&pdev->dev, "Failed to add gadget udc\n");
  1242. goto err;
  1243. }
  1244. dev_info(&pdev->dev, "Initialized udc in USB%s mode\n",
  1245. udc->force_usb1 ? "1" : "2");
  1246. return 0;
  1247. err:
  1248. dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc);
  1249. ast_udc_remove(pdev);
  1250. return rc;
  1251. }
  1252. static const struct of_device_id ast_udc_of_dt_ids[] = {
  1253. { .compatible = "aspeed,ast2600-udc", },
  1254. {}
  1255. };
  1256. MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids);
  1257. static struct platform_driver ast_udc_driver = {
  1258. .probe = ast_udc_probe,
  1259. .remove = ast_udc_remove,
  1260. .driver = {
  1261. .name = KBUILD_MODNAME,
  1262. .of_match_table = ast_udc_of_dt_ids,
  1263. },
  1264. };
  1265. module_platform_driver(ast_udc_driver);
  1266. MODULE_DESCRIPTION("ASPEED UDC driver");
  1267. MODULE_AUTHOR("Neal Liu <[email protected]>");
  1268. MODULE_LICENSE("GPL");