ep0.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
  4. *
  5. * ep0.c - Endpoint 0 handling
  6. *
  7. * Copyright 2017 IBM Corporation
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/delay.h>
  13. #include <linux/ioport.h>
  14. #include <linux/slab.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/prefetch.h>
  20. #include <linux/clk.h>
  21. #include <linux/usb/gadget.h>
  22. #include <linux/of.h>
  23. #include <linux/of_gpio.h>
  24. #include <linux/regmap.h>
  25. #include <linux/dma-mapping.h>
  26. #include "vhub.h"
  27. int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
  28. {
  29. struct usb_request *req = &ep->ep0.req.req;
  30. int rc;
  31. if (WARN_ON(ep->d_idx != 0))
  32. return std_req_stall;
  33. if (WARN_ON(!ep->ep0.dir_in))
  34. return std_req_stall;
  35. if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
  36. return std_req_stall;
  37. if (WARN_ON(req->status == -EINPROGRESS))
  38. return std_req_stall;
  39. req->buf = ptr;
  40. req->length = len;
  41. req->complete = NULL;
  42. req->zero = true;
  43. /*
  44. * Call internal queue directly after dropping the lock. This is
  45. * safe to do as the reply is always the last thing done when
  46. * processing a SETUP packet, usually as a tail call
  47. */
  48. spin_unlock(&ep->vhub->lock);
  49. if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
  50. rc = std_req_stall;
  51. else
  52. rc = std_req_data;
  53. spin_lock(&ep->vhub->lock);
  54. return rc;
  55. }
  56. int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
  57. {
  58. u8 *buffer = ep->buf;
  59. unsigned int i;
  60. va_list args;
  61. va_start(args, len);
  62. /* Copy data directly into EP buffer */
  63. for (i = 0; i < len; i++)
  64. buffer[i] = va_arg(args, int);
  65. va_end(args);
  66. /* req->buf NULL means data is already there */
  67. return ast_vhub_reply(ep, NULL, len);
  68. }
  69. void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
  70. {
  71. struct usb_ctrlrequest crq;
  72. enum std_req_rc std_req_rc;
  73. int rc = -ENODEV;
  74. if (WARN_ON(ep->d_idx != 0))
  75. return;
  76. /*
  77. * Grab the setup packet from the chip and byteswap
  78. * interesting fields
  79. */
  80. memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
  81. EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
  82. crq.bRequestType, crq.bRequest,
  83. le16_to_cpu(crq.wValue),
  84. le16_to_cpu(crq.wIndex),
  85. le16_to_cpu(crq.wLength),
  86. (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
  87. ep->ep0.state);
  88. /*
  89. * Check our state, cancel pending requests if needed
  90. *
  91. * Note: Under some circumstances, we can get a new setup
  92. * packet while waiting for the stall ack, just accept it.
  93. *
  94. * In any case, a SETUP packet in wrong state should have
  95. * reset the HW state machine, so let's just log, nuke
  96. * requests, move on.
  97. */
  98. if (ep->ep0.state != ep0_state_token &&
  99. ep->ep0.state != ep0_state_stall) {
  100. EPDBG(ep, "wrong state\n");
  101. ast_vhub_nuke(ep, -EIO);
  102. }
  103. /* Calculate next state for EP0 */
  104. ep->ep0.state = ep0_state_data;
  105. ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
  106. /* If this is the vHub, we handle requests differently */
  107. std_req_rc = std_req_driver;
  108. if (ep->dev == NULL) {
  109. if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  110. std_req_rc = ast_vhub_std_hub_request(ep, &crq);
  111. else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
  112. std_req_rc = ast_vhub_class_hub_request(ep, &crq);
  113. else
  114. std_req_rc = std_req_stall;
  115. } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  116. std_req_rc = ast_vhub_std_dev_request(ep, &crq);
  117. /* Act upon result */
  118. switch(std_req_rc) {
  119. case std_req_complete:
  120. goto complete;
  121. case std_req_stall:
  122. goto stall;
  123. case std_req_driver:
  124. break;
  125. case std_req_data:
  126. return;
  127. }
  128. /* Pass request up to the gadget driver */
  129. if (WARN_ON(!ep->dev))
  130. goto stall;
  131. if (ep->dev->driver) {
  132. EPDBG(ep, "forwarding to gadget...\n");
  133. spin_unlock(&ep->vhub->lock);
  134. rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
  135. spin_lock(&ep->vhub->lock);
  136. EPDBG(ep, "driver returned %d\n", rc);
  137. } else {
  138. EPDBG(ep, "no gadget for request !\n");
  139. }
  140. if (rc >= 0)
  141. return;
  142. stall:
  143. EPDBG(ep, "stalling\n");
  144. writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
  145. ep->ep0.state = ep0_state_stall;
  146. ep->ep0.dir_in = false;
  147. return;
  148. complete:
  149. EPVDBG(ep, "sending [in] status with no data\n");
  150. writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
  151. ep->ep0.state = ep0_state_status;
  152. ep->ep0.dir_in = false;
  153. }
  154. static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
  155. struct ast_vhub_req *req)
  156. {
  157. unsigned int chunk;
  158. u32 reg;
  159. /* If this is a 0-length request, it's the gadget trying to
  160. * send a status on our behalf. We take it from here.
  161. */
  162. if (req->req.length == 0)
  163. req->last_desc = 1;
  164. /* Are we done ? Complete request, otherwise wait for next interrupt */
  165. if (req->last_desc >= 0) {
  166. EPVDBG(ep, "complete send %d/%d\n",
  167. req->req.actual, req->req.length);
  168. ep->ep0.state = ep0_state_status;
  169. writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
  170. ast_vhub_done(ep, req, 0);
  171. return;
  172. }
  173. /*
  174. * Next chunk cropped to max packet size. Also check if this
  175. * is the last packet
  176. */
  177. chunk = req->req.length - req->req.actual;
  178. if (chunk > ep->ep.maxpacket)
  179. chunk = ep->ep.maxpacket;
  180. else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
  181. req->last_desc = 1;
  182. EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
  183. chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
  184. /*
  185. * Copy data if any (internal requests already have data
  186. * in the EP buffer)
  187. */
  188. if (chunk && req->req.buf)
  189. memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
  190. vhub_dma_workaround(ep->buf);
  191. /* Remember chunk size and trigger send */
  192. reg = VHUB_EP0_SET_TX_LEN(chunk);
  193. writel(reg, ep->ep0.ctlstat);
  194. writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
  195. req->req.actual += chunk;
  196. }
  197. static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
  198. {
  199. EPVDBG(ep, "rx prime\n");
  200. /* Prime endpoint for receiving data */
  201. writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
  202. }
  203. static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
  204. unsigned int len)
  205. {
  206. unsigned int remain;
  207. int rc = 0;
  208. /* We are receiving... grab request */
  209. remain = req->req.length - req->req.actual;
  210. EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
  211. /* Are we getting more than asked ? */
  212. if (len > remain) {
  213. EPDBG(ep, "receiving too much (ovf: %d) !\n",
  214. len - remain);
  215. len = remain;
  216. rc = -EOVERFLOW;
  217. }
  218. /* Hardware return wrong data len */
  219. if (len < ep->ep.maxpacket && len != remain) {
  220. EPDBG(ep, "using expected data len instead\n");
  221. len = remain;
  222. }
  223. if (len && req->req.buf)
  224. memcpy(req->req.buf + req->req.actual, ep->buf, len);
  225. req->req.actual += len;
  226. /* Done ? */
  227. if (len < ep->ep.maxpacket || len == remain) {
  228. ep->ep0.state = ep0_state_status;
  229. writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
  230. ast_vhub_done(ep, req, rc);
  231. } else
  232. ast_vhub_ep0_rx_prime(ep);
  233. }
  234. void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
  235. {
  236. struct ast_vhub_req *req;
  237. struct ast_vhub *vhub = ep->vhub;
  238. struct device *dev = &vhub->pdev->dev;
  239. bool stall = false;
  240. u32 stat;
  241. /* Read EP0 status */
  242. stat = readl(ep->ep0.ctlstat);
  243. /* Grab current request if any */
  244. req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
  245. EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
  246. stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
  247. switch(ep->ep0.state) {
  248. case ep0_state_token:
  249. /* There should be no request queued in that state... */
  250. if (req) {
  251. dev_warn(dev, "request present while in TOKEN state\n");
  252. ast_vhub_nuke(ep, -EINVAL);
  253. }
  254. dev_warn(dev, "ack while in TOKEN state\n");
  255. stall = true;
  256. break;
  257. case ep0_state_data:
  258. /* Check the state bits corresponding to our direction */
  259. if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
  260. (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
  261. (ep->ep0.dir_in != in_ack)) {
  262. /* In that case, ignore interrupt */
  263. dev_warn(dev, "irq state mismatch");
  264. break;
  265. }
  266. /*
  267. * We are in data phase and there's no request, something is
  268. * wrong, stall
  269. */
  270. if (!req) {
  271. dev_warn(dev, "data phase, no request\n");
  272. stall = true;
  273. break;
  274. }
  275. /* We have a request, handle data transfers */
  276. if (ep->ep0.dir_in)
  277. ast_vhub_ep0_do_send(ep, req);
  278. else
  279. ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
  280. return;
  281. case ep0_state_status:
  282. /* Nuke stale requests */
  283. if (req) {
  284. dev_warn(dev, "request present while in STATUS state\n");
  285. ast_vhub_nuke(ep, -EINVAL);
  286. }
  287. /*
  288. * If the status phase completes with the wrong ack, stall
  289. * the endpoint just in case, to abort whatever the host
  290. * was doing.
  291. */
  292. if (ep->ep0.dir_in == in_ack) {
  293. dev_warn(dev, "status direction mismatch\n");
  294. stall = true;
  295. }
  296. break;
  297. case ep0_state_stall:
  298. /*
  299. * There shouldn't be any request left, but nuke just in case
  300. * otherwise the stale request will block subsequent ones
  301. */
  302. ast_vhub_nuke(ep, -EIO);
  303. break;
  304. }
  305. /* Reset to token state or stall */
  306. if (stall) {
  307. writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
  308. ep->ep0.state = ep0_state_stall;
  309. } else
  310. ep->ep0.state = ep0_state_token;
  311. }
  312. static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
  313. gfp_t gfp_flags)
  314. {
  315. struct ast_vhub_req *req = to_ast_req(u_req);
  316. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  317. struct ast_vhub *vhub = ep->vhub;
  318. struct device *dev = &vhub->pdev->dev;
  319. unsigned long flags;
  320. /* Paranoid cheks */
  321. if (!u_req || (!u_req->complete && !req->internal)) {
  322. dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
  323. if (u_req) {
  324. dev_warn(dev, "complete=%p internal=%d\n",
  325. u_req->complete, req->internal);
  326. }
  327. return -EINVAL;
  328. }
  329. /* Not endpoint 0 ? */
  330. if (WARN_ON(ep->d_idx != 0))
  331. return -EINVAL;
  332. /* Disabled device */
  333. if (ep->dev && !ep->dev->enabled)
  334. return -ESHUTDOWN;
  335. /* Data, no buffer and not internal ? */
  336. if (u_req->length && !u_req->buf && !req->internal) {
  337. dev_warn(dev, "Request with no buffer !\n");
  338. return -EINVAL;
  339. }
  340. EPVDBG(ep, "enqueue req @%p\n", req);
  341. EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
  342. u_req->length, u_req->zero,
  343. u_req->short_not_ok, ep->ep0.dir_in);
  344. /* Initialize request progress fields */
  345. u_req->status = -EINPROGRESS;
  346. u_req->actual = 0;
  347. req->last_desc = -1;
  348. req->active = false;
  349. spin_lock_irqsave(&vhub->lock, flags);
  350. /* EP0 can only support a single request at a time */
  351. if (!list_empty(&ep->queue) ||
  352. ep->ep0.state == ep0_state_token ||
  353. ep->ep0.state == ep0_state_stall) {
  354. dev_warn(dev, "EP0: Request in wrong state\n");
  355. EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
  356. list_empty(&ep->queue), ep->ep0.state);
  357. spin_unlock_irqrestore(&vhub->lock, flags);
  358. return -EBUSY;
  359. }
  360. /* Add request to list and kick processing if empty */
  361. list_add_tail(&req->queue, &ep->queue);
  362. if (ep->ep0.dir_in) {
  363. /* IN request, send data */
  364. ast_vhub_ep0_do_send(ep, req);
  365. } else if (u_req->length == 0) {
  366. /* 0-len request, send completion as rx */
  367. EPVDBG(ep, "0-length rx completion\n");
  368. ep->ep0.state = ep0_state_status;
  369. writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
  370. ast_vhub_done(ep, req, 0);
  371. } else {
  372. /* OUT request, start receiver */
  373. ast_vhub_ep0_rx_prime(ep);
  374. }
  375. spin_unlock_irqrestore(&vhub->lock, flags);
  376. return 0;
  377. }
  378. static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
  379. {
  380. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  381. struct ast_vhub *vhub = ep->vhub;
  382. struct ast_vhub_req *req;
  383. unsigned long flags;
  384. int rc = -EINVAL;
  385. spin_lock_irqsave(&vhub->lock, flags);
  386. /* Only one request can be in the queue */
  387. req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
  388. /* Is it ours ? */
  389. if (req && u_req == &req->req) {
  390. EPVDBG(ep, "dequeue req @%p\n", req);
  391. /*
  392. * We don't have to deal with "active" as all
  393. * DMAs go to the EP buffers, not the request.
  394. */
  395. ast_vhub_done(ep, req, -ECONNRESET);
  396. /* We do stall the EP to clean things up in HW */
  397. writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
  398. ep->ep0.state = ep0_state_status;
  399. ep->ep0.dir_in = false;
  400. rc = 0;
  401. }
  402. spin_unlock_irqrestore(&vhub->lock, flags);
  403. return rc;
  404. }
  405. static const struct usb_ep_ops ast_vhub_ep0_ops = {
  406. .queue = ast_vhub_ep0_queue,
  407. .dequeue = ast_vhub_ep0_dequeue,
  408. .alloc_request = ast_vhub_alloc_request,
  409. .free_request = ast_vhub_free_request,
  410. };
  411. void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
  412. {
  413. struct ast_vhub_ep *ep = &dev->ep0;
  414. ast_vhub_nuke(ep, -EIO);
  415. ep->ep0.state = ep0_state_token;
  416. }
  417. void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
  418. struct ast_vhub_dev *dev)
  419. {
  420. memset(ep, 0, sizeof(*ep));
  421. INIT_LIST_HEAD(&ep->ep.ep_list);
  422. INIT_LIST_HEAD(&ep->queue);
  423. ep->ep.ops = &ast_vhub_ep0_ops;
  424. ep->ep.name = "ep0";
  425. ep->ep.caps.type_control = true;
  426. usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
  427. ep->d_idx = 0;
  428. ep->dev = dev;
  429. ep->vhub = vhub;
  430. ep->ep0.state = ep0_state_token;
  431. INIT_LIST_HEAD(&ep->ep0.req.queue);
  432. ep->ep0.req.internal = true;
  433. /* Small difference between vHub and devices */
  434. if (dev) {
  435. ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
  436. ep->ep0.setup = vhub->regs +
  437. AST_VHUB_SETUP0 + 8 * (dev->index + 1);
  438. ep->buf = vhub->ep0_bufs +
  439. AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
  440. ep->buf_dma = vhub->ep0_bufs_dma +
  441. AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
  442. } else {
  443. ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
  444. ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
  445. ep->buf = vhub->ep0_bufs;
  446. ep->buf_dma = vhub->ep0_bufs_dma;
  447. }
  448. }