mtu3_gadget_ep0.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
  4. *
  5. * Copyright (c) 2016 MediaTek Inc.
  6. *
  7. * Author: Chunfeng.Yun <[email protected]>
  8. */
  9. #include <linux/iopoll.h>
  10. #include <linux/usb/composite.h>
  11. #include "mtu3.h"
  12. #include "mtu3_debug.h"
  13. #include "mtu3_trace.h"
  14. /* ep0 is always mtu3->in_eps[0] */
  15. #define next_ep0_request(mtu) next_request((mtu)->ep0)
  16. /* for high speed test mode; see USB 2.0 spec 7.1.20 */
  17. static const u8 mtu3_test_packet[53] = {
  18. /* implicit SYNC then DATA0 to start */
  19. /* JKJKJKJK x9 */
  20. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  21. /* JJKKJJKK x8 */
  22. 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
  23. /* JJJJKKKK x8 */
  24. 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
  25. /* JJJJJJJKKKKKKK x8 */
  26. 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  27. /* JJJJJJJK x8 */
  28. 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
  29. /* JKKKKKKK x10, JK */
  30. 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e,
  31. /* implicit CRC16 then EOP to end */
  32. };
  33. static char *decode_ep0_state(struct mtu3 *mtu)
  34. {
  35. switch (mtu->ep0_state) {
  36. case MU3D_EP0_STATE_SETUP:
  37. return "SETUP";
  38. case MU3D_EP0_STATE_TX:
  39. return "IN";
  40. case MU3D_EP0_STATE_RX:
  41. return "OUT";
  42. case MU3D_EP0_STATE_TX_END:
  43. return "TX-END";
  44. case MU3D_EP0_STATE_STALL:
  45. return "STALL";
  46. default:
  47. return "??";
  48. }
  49. }
  50. static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
  51. {
  52. mtu3_req_complete(mtu->ep0, req, 0);
  53. }
  54. static int
  55. forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
  56. __releases(mtu->lock)
  57. __acquires(mtu->lock)
  58. {
  59. int ret;
  60. if (!mtu->gadget_driver || !mtu->async_callbacks)
  61. return -EOPNOTSUPP;
  62. spin_unlock(&mtu->lock);
  63. ret = mtu->gadget_driver->setup(&mtu->g, setup);
  64. spin_lock(&mtu->lock);
  65. dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret);
  66. return ret;
  67. }
  68. static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len)
  69. {
  70. void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
  71. u16 index = 0;
  72. dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n",
  73. __func__, mep->epnum, len, src);
  74. if (len >= 4) {
  75. iowrite32_rep(fifo, src, len >> 2);
  76. index = len & ~0x03;
  77. }
  78. if (len & 0x02) {
  79. writew(*(u16 *)&src[index], fifo);
  80. index += 2;
  81. }
  82. if (len & 0x01)
  83. writeb(src[index], fifo);
  84. }
  85. static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len)
  86. {
  87. void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
  88. u32 value;
  89. u16 index = 0;
  90. dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n",
  91. __func__, mep->epnum, len, dst);
  92. if (len >= 4) {
  93. ioread32_rep(fifo, dst, len >> 2);
  94. index = len & ~0x03;
  95. }
  96. if (len & 0x3) {
  97. value = readl(fifo);
  98. memcpy(&dst[index], &value, len & 0x3);
  99. }
  100. }
  101. static void ep0_load_test_packet(struct mtu3 *mtu)
  102. {
  103. /*
  104. * because the length of test packet is less than max packet of HS ep0,
  105. * write it into fifo directly.
  106. */
  107. ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet));
  108. }
  109. /*
  110. * A. send STALL for setup transfer without data stage:
  111. * set SENDSTALL and SETUPPKTRDY at the same time;
  112. * B. send STALL for other cases:
  113. * set SENDSTALL only.
  114. */
  115. static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
  116. {
  117. struct mtu3 *mtu = mep0->mtu;
  118. void __iomem *mbase = mtu->mac_base;
  119. u32 csr;
  120. /* EP0_SENTSTALL is W1C */
  121. csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
  122. if (set)
  123. csr |= EP0_SENDSTALL | pktrdy;
  124. else
  125. csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
  126. mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
  127. mtu->delayed_status = false;
  128. mtu->ep0_state = MU3D_EP0_STATE_SETUP;
  129. dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n",
  130. set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
  131. }
  132. static void ep0_do_status_stage(struct mtu3 *mtu)
  133. {
  134. void __iomem *mbase = mtu->mac_base;
  135. u32 value;
  136. value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
  137. mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
  138. }
  139. static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
  140. static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
  141. {}
  142. static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
  143. {
  144. struct mtu3_request *mreq;
  145. struct mtu3 *mtu;
  146. struct usb_set_sel_req sel;
  147. memcpy(&sel, req->buf, sizeof(sel));
  148. mreq = to_mtu3_request(req);
  149. mtu = mreq->mtu;
  150. dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n",
  151. sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel);
  152. }
  153. /* queue data stage to handle 6 byte SET_SEL request */
  154. static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
  155. {
  156. int ret;
  157. u16 length = le16_to_cpu(setup->wLength);
  158. if (unlikely(length != 6)) {
  159. dev_err(mtu->dev, "%s wrong wLength:%d\n",
  160. __func__, length);
  161. return -EINVAL;
  162. }
  163. mtu->ep0_req.mep = mtu->ep0;
  164. mtu->ep0_req.request.length = 6;
  165. mtu->ep0_req.request.buf = mtu->setup_buf;
  166. mtu->ep0_req.request.complete = ep0_set_sel_complete;
  167. ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
  168. return ret < 0 ? ret : 1;
  169. }
  170. static int
  171. ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
  172. {
  173. struct mtu3_ep *mep = NULL;
  174. int handled = 1;
  175. u8 result[2] = {0, 0};
  176. u8 epnum = 0;
  177. int is_in;
  178. switch (setup->bRequestType & USB_RECIP_MASK) {
  179. case USB_RECIP_DEVICE:
  180. result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
  181. result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
  182. if (mtu->g.speed >= USB_SPEED_SUPER) {
  183. result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
  184. result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
  185. }
  186. dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__,
  187. result[0], mtu->u1_enable, mtu->u2_enable);
  188. break;
  189. case USB_RECIP_INTERFACE:
  190. /* status of function remote wakeup, forward request */
  191. handled = 0;
  192. break;
  193. case USB_RECIP_ENDPOINT:
  194. epnum = (u8) le16_to_cpu(setup->wIndex);
  195. is_in = epnum & USB_DIR_IN;
  196. epnum &= USB_ENDPOINT_NUMBER_MASK;
  197. if (epnum >= mtu->num_eps) {
  198. handled = -EINVAL;
  199. break;
  200. }
  201. if (!epnum)
  202. break;
  203. mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
  204. if (!mep->desc) {
  205. handled = -EINVAL;
  206. break;
  207. }
  208. if (mep->flags & MTU3_EP_STALL)
  209. result[0] |= 1 << USB_ENDPOINT_HALT;
  210. break;
  211. default:
  212. /* class, vendor, etc ... delegate */
  213. handled = 0;
  214. break;
  215. }
  216. if (handled > 0) {
  217. int ret;
  218. /* prepare a data stage for GET_STATUS */
  219. dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result);
  220. memcpy(mtu->setup_buf, result, sizeof(result));
  221. mtu->ep0_req.mep = mtu->ep0;
  222. mtu->ep0_req.request.length = 2;
  223. mtu->ep0_req.request.buf = &mtu->setup_buf;
  224. mtu->ep0_req.request.complete = ep0_dummy_complete;
  225. ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
  226. if (ret < 0)
  227. handled = ret;
  228. }
  229. return handled;
  230. }
  231. static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
  232. {
  233. void __iomem *mbase = mtu->mac_base;
  234. int handled = 1;
  235. u32 value;
  236. switch (le16_to_cpu(setup->wIndex) >> 8) {
  237. case USB_TEST_J:
  238. dev_dbg(mtu->dev, "USB_TEST_J\n");
  239. mtu->test_mode_nr = TEST_J_MODE;
  240. break;
  241. case USB_TEST_K:
  242. dev_dbg(mtu->dev, "USB_TEST_K\n");
  243. mtu->test_mode_nr = TEST_K_MODE;
  244. break;
  245. case USB_TEST_SE0_NAK:
  246. dev_dbg(mtu->dev, "USB_TEST_SE0_NAK\n");
  247. mtu->test_mode_nr = TEST_SE0_NAK_MODE;
  248. break;
  249. case USB_TEST_PACKET:
  250. dev_dbg(mtu->dev, "USB_TEST_PACKET\n");
  251. mtu->test_mode_nr = TEST_PACKET_MODE;
  252. break;
  253. default:
  254. handled = -EINVAL;
  255. goto out;
  256. }
  257. mtu->test_mode = true;
  258. /* no TX completion interrupt, and need restart platform after test */
  259. if (mtu->test_mode_nr == TEST_PACKET_MODE)
  260. ep0_load_test_packet(mtu);
  261. /* send status before entering test mode. */
  262. ep0_do_status_stage(mtu);
  263. /* wait for ACK status sent by host */
  264. readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
  265. !(value & EP0_DATAEND), 100, 5000);
  266. mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr);
  267. mtu->ep0_state = MU3D_EP0_STATE_SETUP;
  268. out:
  269. return handled;
  270. }
  271. static int ep0_handle_feature_dev(struct mtu3 *mtu,
  272. struct usb_ctrlrequest *setup, bool set)
  273. {
  274. void __iomem *mbase = mtu->mac_base;
  275. int handled = -EINVAL;
  276. u32 lpc;
  277. switch (le16_to_cpu(setup->wValue)) {
  278. case USB_DEVICE_REMOTE_WAKEUP:
  279. mtu->may_wakeup = !!set;
  280. handled = 1;
  281. break;
  282. case USB_DEVICE_TEST_MODE:
  283. if (!set || (mtu->g.speed != USB_SPEED_HIGH) ||
  284. (le16_to_cpu(setup->wIndex) & 0xff))
  285. break;
  286. handled = handle_test_mode(mtu, setup);
  287. break;
  288. case USB_DEVICE_U1_ENABLE:
  289. if (mtu->g.speed < USB_SPEED_SUPER ||
  290. mtu->g.state != USB_STATE_CONFIGURED)
  291. break;
  292. lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
  293. if (set)
  294. lpc |= SW_U1_REQUEST_ENABLE;
  295. else
  296. lpc &= ~SW_U1_REQUEST_ENABLE;
  297. mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
  298. mtu->u1_enable = !!set;
  299. handled = 1;
  300. break;
  301. case USB_DEVICE_U2_ENABLE:
  302. if (mtu->g.speed < USB_SPEED_SUPER ||
  303. mtu->g.state != USB_STATE_CONFIGURED)
  304. break;
  305. lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
  306. if (set)
  307. lpc |= SW_U2_REQUEST_ENABLE;
  308. else
  309. lpc &= ~SW_U2_REQUEST_ENABLE;
  310. mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
  311. mtu->u2_enable = !!set;
  312. handled = 1;
  313. break;
  314. default:
  315. handled = -EINVAL;
  316. break;
  317. }
  318. return handled;
  319. }
  320. static int ep0_handle_feature(struct mtu3 *mtu,
  321. struct usb_ctrlrequest *setup, bool set)
  322. {
  323. struct mtu3_ep *mep;
  324. int handled = -EINVAL;
  325. int is_in;
  326. u16 value;
  327. u16 index;
  328. u8 epnum;
  329. value = le16_to_cpu(setup->wValue);
  330. index = le16_to_cpu(setup->wIndex);
  331. switch (setup->bRequestType & USB_RECIP_MASK) {
  332. case USB_RECIP_DEVICE:
  333. handled = ep0_handle_feature_dev(mtu, setup, set);
  334. break;
  335. case USB_RECIP_INTERFACE:
  336. /* superspeed only */
  337. if (value == USB_INTRF_FUNC_SUSPEND &&
  338. mtu->g.speed >= USB_SPEED_SUPER) {
  339. /* forward the request for function suspend */
  340. mtu->may_wakeup = !!(index & USB_INTRF_FUNC_SUSPEND_RW);
  341. handled = 0;
  342. }
  343. break;
  344. case USB_RECIP_ENDPOINT:
  345. epnum = index & USB_ENDPOINT_NUMBER_MASK;
  346. if (epnum == 0 || epnum >= mtu->num_eps ||
  347. value != USB_ENDPOINT_HALT)
  348. break;
  349. is_in = index & USB_DIR_IN;
  350. mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
  351. if (!mep->desc)
  352. break;
  353. handled = 1;
  354. /* ignore request if endpoint is wedged */
  355. if (mep->flags & MTU3_EP_WEDGE)
  356. break;
  357. mtu3_ep_stall_set(mep, set);
  358. break;
  359. default:
  360. /* class, vendor, etc ... delegate */
  361. handled = 0;
  362. break;
  363. }
  364. return handled;
  365. }
  366. /*
  367. * handle all control requests can be handled
  368. * returns:
  369. * negative errno - error happened
  370. * zero - need delegate SETUP to gadget driver
  371. * positive - already handled
  372. */
  373. static int handle_standard_request(struct mtu3 *mtu,
  374. struct usb_ctrlrequest *setup)
  375. {
  376. void __iomem *mbase = mtu->mac_base;
  377. enum usb_device_state state = mtu->g.state;
  378. int handled = -EINVAL;
  379. u32 dev_conf;
  380. u16 value;
  381. value = le16_to_cpu(setup->wValue);
  382. /* the gadget driver handles everything except what we must handle */
  383. switch (setup->bRequest) {
  384. case USB_REQ_SET_ADDRESS:
  385. /* change it after the status stage */
  386. mtu->address = (u8) (value & 0x7f);
  387. dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address);
  388. dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF);
  389. dev_conf &= ~DEV_ADDR_MSK;
  390. dev_conf |= DEV_ADDR(mtu->address);
  391. mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf);
  392. if (mtu->address)
  393. usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS);
  394. else
  395. usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT);
  396. handled = 1;
  397. break;
  398. case USB_REQ_SET_CONFIGURATION:
  399. if (state == USB_STATE_ADDRESS) {
  400. usb_gadget_set_state(&mtu->g,
  401. USB_STATE_CONFIGURED);
  402. } else if (state == USB_STATE_CONFIGURED) {
  403. /*
  404. * USB2 spec sec 9.4.7, if wValue is 0 then dev
  405. * is moved to addressed state
  406. */
  407. if (!value)
  408. usb_gadget_set_state(&mtu->g,
  409. USB_STATE_ADDRESS);
  410. }
  411. handled = 0;
  412. break;
  413. case USB_REQ_CLEAR_FEATURE:
  414. handled = ep0_handle_feature(mtu, setup, 0);
  415. break;
  416. case USB_REQ_SET_FEATURE:
  417. handled = ep0_handle_feature(mtu, setup, 1);
  418. break;
  419. case USB_REQ_GET_STATUS:
  420. handled = ep0_get_status(mtu, setup);
  421. break;
  422. case USB_REQ_SET_SEL:
  423. handled = ep0_set_sel(mtu, setup);
  424. break;
  425. case USB_REQ_SET_ISOCH_DELAY:
  426. handled = 1;
  427. break;
  428. default:
  429. /* delegate SET_CONFIGURATION, etc */
  430. handled = 0;
  431. }
  432. return handled;
  433. }
  434. /* receive an data packet (OUT) */
  435. static void ep0_rx_state(struct mtu3 *mtu)
  436. {
  437. struct mtu3_request *mreq;
  438. struct usb_request *req;
  439. void __iomem *mbase = mtu->mac_base;
  440. u32 maxp;
  441. u32 csr;
  442. u16 count = 0;
  443. dev_dbg(mtu->dev, "%s\n", __func__);
  444. csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
  445. mreq = next_ep0_request(mtu);
  446. req = &mreq->request;
  447. /* read packet and ack; or stall because of gadget driver bug */
  448. if (req) {
  449. void *buf = req->buf + req->actual;
  450. unsigned int len = req->length - req->actual;
  451. /* read the buffer */
  452. count = mtu3_readl(mbase, U3D_RXCOUNT0);
  453. if (count > len) {
  454. req->status = -EOVERFLOW;
  455. count = len;
  456. }
  457. ep0_read_fifo(mtu->ep0, buf, count);
  458. req->actual += count;
  459. csr |= EP0_RXPKTRDY;
  460. maxp = mtu->g.ep0->maxpacket;
  461. if (count < maxp || req->actual == req->length) {
  462. mtu->ep0_state = MU3D_EP0_STATE_SETUP;
  463. dev_dbg(mtu->dev, "ep0 state: %s\n",
  464. decode_ep0_state(mtu));
  465. csr |= EP0_DATAEND;
  466. } else {
  467. req = NULL;
  468. }
  469. } else {
  470. csr |= EP0_RXPKTRDY | EP0_SENDSTALL;
  471. dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__);
  472. }
  473. mtu3_writel(mbase, U3D_EP0CSR, csr);
  474. /* give back the request if have received all data */
  475. if (req)
  476. ep0_req_giveback(mtu, req);
  477. }
  478. /* transmitting to the host (IN) */
  479. static void ep0_tx_state(struct mtu3 *mtu)
  480. {
  481. struct mtu3_request *mreq = next_ep0_request(mtu);
  482. struct usb_request *req;
  483. u32 csr;
  484. u8 *src;
  485. u32 count;
  486. u32 maxp;
  487. dev_dbg(mtu->dev, "%s\n", __func__);
  488. if (!mreq)
  489. return;
  490. maxp = mtu->g.ep0->maxpacket;
  491. req = &mreq->request;
  492. /* load the data */
  493. src = (u8 *)req->buf + req->actual;
  494. count = min(maxp, req->length - req->actual);
  495. if (count)
  496. ep0_write_fifo(mtu->ep0, src, count);
  497. dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n",
  498. __func__, req->actual, req->length, count, maxp, req->zero);
  499. req->actual += count;
  500. if ((count < maxp)
  501. || ((req->actual == req->length) && !req->zero))
  502. mtu->ep0_state = MU3D_EP0_STATE_TX_END;
  503. /* send it out, triggering a "txpktrdy cleared" irq */
  504. csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
  505. mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY);
  506. dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__,
  507. mtu3_readl(mtu->mac_base, U3D_EP0CSR));
  508. }
  509. static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
  510. {
  511. struct mtu3_request *mreq;
  512. u32 count;
  513. u32 csr;
  514. csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
  515. count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0);
  516. ep0_read_fifo(mtu->ep0, (u8 *)setup, count);
  517. dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n",
  518. setup->bRequestType, setup->bRequest,
  519. le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex),
  520. le16_to_cpu(setup->wLength));
  521. /* clean up any leftover transfers */
  522. mreq = next_ep0_request(mtu);
  523. if (mreq)
  524. ep0_req_giveback(mtu, &mreq->request);
  525. if (le16_to_cpu(setup->wLength) == 0) {
  526. ; /* no data stage, nothing to do */
  527. } else if (setup->bRequestType & USB_DIR_IN) {
  528. mtu3_writel(mtu->mac_base, U3D_EP0CSR,
  529. csr | EP0_SETUPPKTRDY | EP0_DPHTX);
  530. mtu->ep0_state = MU3D_EP0_STATE_TX;
  531. } else {
  532. mtu3_writel(mtu->mac_base, U3D_EP0CSR,
  533. (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX));
  534. mtu->ep0_state = MU3D_EP0_STATE_RX;
  535. }
  536. }
  537. static int ep0_handle_setup(struct mtu3 *mtu)
  538. __releases(mtu->lock)
  539. __acquires(mtu->lock)
  540. {
  541. struct usb_ctrlrequest setup;
  542. struct mtu3_request *mreq;
  543. int handled = 0;
  544. ep0_read_setup(mtu, &setup);
  545. trace_mtu3_handle_setup(&setup);
  546. if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  547. handled = handle_standard_request(mtu, &setup);
  548. dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n",
  549. handled, decode_ep0_state(mtu));
  550. if (handled < 0)
  551. goto stall;
  552. else if (handled > 0)
  553. goto finish;
  554. handled = forward_to_driver(mtu, &setup);
  555. if (handled < 0) {
  556. stall:
  557. dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled);
  558. ep0_stall_set(mtu->ep0, true,
  559. le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY);
  560. return 0;
  561. }
  562. finish:
  563. if (mtu->test_mode) {
  564. ; /* nothing to do */
  565. } else if (handled == USB_GADGET_DELAYED_STATUS) {
  566. mreq = next_ep0_request(mtu);
  567. if (mreq) {
  568. /* already asked us to continue delayed status */
  569. ep0_do_status_stage(mtu);
  570. ep0_req_giveback(mtu, &mreq->request);
  571. } else {
  572. /* do delayed STATUS stage till receive ep0_queue */
  573. mtu->delayed_status = true;
  574. }
  575. } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
  576. ep0_do_status_stage(mtu);
  577. /* complete zlp request directly */
  578. mreq = next_ep0_request(mtu);
  579. if (mreq && !mreq->request.length)
  580. ep0_req_giveback(mtu, &mreq->request);
  581. }
  582. return 0;
  583. }
  584. irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
  585. {
  586. void __iomem *mbase = mtu->mac_base;
  587. struct mtu3_request *mreq;
  588. u32 int_status;
  589. irqreturn_t ret = IRQ_NONE;
  590. u32 csr;
  591. u32 len;
  592. int_status = mtu3_readl(mbase, U3D_EPISR);
  593. int_status &= mtu3_readl(mbase, U3D_EPIER);
  594. mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
  595. /* only handle ep0's */
  596. if (!(int_status & (EP0ISR | SETUPENDISR)))
  597. return IRQ_NONE;
  598. /* abort current SETUP, and process new one */
  599. if (int_status & SETUPENDISR)
  600. mtu->ep0_state = MU3D_EP0_STATE_SETUP;
  601. csr = mtu3_readl(mbase, U3D_EP0CSR);
  602. dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
  603. /* we sent a stall.. need to clear it now.. */
  604. if (csr & EP0_SENTSTALL) {
  605. ep0_stall_set(mtu->ep0, false, 0);
  606. csr = mtu3_readl(mbase, U3D_EP0CSR);
  607. ret = IRQ_HANDLED;
  608. }
  609. dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
  610. mtu3_dbg_trace(mtu->dev, "ep0_state %s", decode_ep0_state(mtu));
  611. switch (mtu->ep0_state) {
  612. case MU3D_EP0_STATE_TX:
  613. /* irq on clearing txpktrdy */
  614. if ((csr & EP0_FIFOFULL) == 0) {
  615. ep0_tx_state(mtu);
  616. ret = IRQ_HANDLED;
  617. }
  618. break;
  619. case MU3D_EP0_STATE_RX:
  620. /* irq on set rxpktrdy */
  621. if (csr & EP0_RXPKTRDY) {
  622. ep0_rx_state(mtu);
  623. ret = IRQ_HANDLED;
  624. }
  625. break;
  626. case MU3D_EP0_STATE_TX_END:
  627. mtu3_writel(mbase, U3D_EP0CSR,
  628. (csr & EP0_W1C_BITS) | EP0_DATAEND);
  629. mreq = next_ep0_request(mtu);
  630. if (mreq)
  631. ep0_req_giveback(mtu, &mreq->request);
  632. mtu->ep0_state = MU3D_EP0_STATE_SETUP;
  633. ret = IRQ_HANDLED;
  634. dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
  635. break;
  636. case MU3D_EP0_STATE_SETUP:
  637. if (!(csr & EP0_SETUPPKTRDY))
  638. break;
  639. len = mtu3_readl(mbase, U3D_RXCOUNT0);
  640. if (len != 8) {
  641. dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len);
  642. break;
  643. }
  644. ep0_handle_setup(mtu);
  645. ret = IRQ_HANDLED;
  646. break;
  647. default:
  648. /* can't happen */
  649. ep0_stall_set(mtu->ep0, true, 0);
  650. WARN_ON(1);
  651. break;
  652. }
  653. return ret;
  654. }
  655. static int mtu3_ep0_enable(struct usb_ep *ep,
  656. const struct usb_endpoint_descriptor *desc)
  657. {
  658. /* always enabled */
  659. return -EINVAL;
  660. }
  661. static int mtu3_ep0_disable(struct usb_ep *ep)
  662. {
  663. /* always enabled */
  664. return -EINVAL;
  665. }
  666. static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
  667. {
  668. struct mtu3 *mtu = mep->mtu;
  669. mreq->mtu = mtu;
  670. mreq->request.actual = 0;
  671. mreq->request.status = -EINPROGRESS;
  672. dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__,
  673. mep->name, decode_ep0_state(mtu), mreq->request.length);
  674. switch (mtu->ep0_state) {
  675. case MU3D_EP0_STATE_SETUP:
  676. case MU3D_EP0_STATE_RX: /* control-OUT data */
  677. case MU3D_EP0_STATE_TX: /* control-IN data */
  678. break;
  679. default:
  680. dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__,
  681. decode_ep0_state(mtu));
  682. return -EINVAL;
  683. }
  684. if (mtu->delayed_status) {
  685. mtu->delayed_status = false;
  686. ep0_do_status_stage(mtu);
  687. /* needn't giveback the request for handling delay STATUS */
  688. return 0;
  689. }
  690. if (!list_empty(&mep->req_list))
  691. return -EBUSY;
  692. list_add_tail(&mreq->list, &mep->req_list);
  693. /* sequence #1, IN ... start writing the data */
  694. if (mtu->ep0_state == MU3D_EP0_STATE_TX)
  695. ep0_tx_state(mtu);
  696. return 0;
  697. }
  698. static int mtu3_ep0_queue(struct usb_ep *ep,
  699. struct usb_request *req, gfp_t gfp)
  700. {
  701. struct mtu3_ep *mep;
  702. struct mtu3_request *mreq;
  703. struct mtu3 *mtu;
  704. unsigned long flags;
  705. int ret = 0;
  706. if (!ep || !req)
  707. return -EINVAL;
  708. mep = to_mtu3_ep(ep);
  709. mtu = mep->mtu;
  710. mreq = to_mtu3_request(req);
  711. spin_lock_irqsave(&mtu->lock, flags);
  712. ret = ep0_queue(mep, mreq);
  713. spin_unlock_irqrestore(&mtu->lock, flags);
  714. return ret;
  715. }
  716. static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
  717. {
  718. /* we just won't support this */
  719. return -EINVAL;
  720. }
  721. static int mtu3_ep0_halt(struct usb_ep *ep, int value)
  722. {
  723. struct mtu3_ep *mep;
  724. struct mtu3 *mtu;
  725. unsigned long flags;
  726. int ret = 0;
  727. if (!ep || !value)
  728. return -EINVAL;
  729. mep = to_mtu3_ep(ep);
  730. mtu = mep->mtu;
  731. dev_dbg(mtu->dev, "%s\n", __func__);
  732. spin_lock_irqsave(&mtu->lock, flags);
  733. if (!list_empty(&mep->req_list)) {
  734. ret = -EBUSY;
  735. goto cleanup;
  736. }
  737. switch (mtu->ep0_state) {
  738. /*
  739. * stalls are usually issued after parsing SETUP packet, either
  740. * directly in irq context from setup() or else later.
  741. */
  742. case MU3D_EP0_STATE_TX:
  743. case MU3D_EP0_STATE_TX_END:
  744. case MU3D_EP0_STATE_RX:
  745. case MU3D_EP0_STATE_SETUP:
  746. ep0_stall_set(mtu->ep0, true, 0);
  747. break;
  748. default:
  749. dev_dbg(mtu->dev, "ep0 can't halt in state %s\n",
  750. decode_ep0_state(mtu));
  751. ret = -EINVAL;
  752. }
  753. cleanup:
  754. spin_unlock_irqrestore(&mtu->lock, flags);
  755. return ret;
  756. }
  757. const struct usb_ep_ops mtu3_ep0_ops = {
  758. .enable = mtu3_ep0_enable,
  759. .disable = mtu3_ep0_disable,
  760. .alloc_request = mtu3_alloc_request,
  761. .free_request = mtu3_free_request,
  762. .queue = mtu3_ep0_queue,
  763. .dequeue = mtu3_ep0_dequeue,
  764. .set_halt = mtu3_ep0_halt,
  765. };