stub_rx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2003-2008 Takahiro Hirofuchi
  4. */
  5. #include <asm/byteorder.h>
  6. #include <linux/kthread.h>
  7. #include <linux/usb.h>
  8. #include <linux/usb/hcd.h>
  9. #include <linux/scatterlist.h>
  10. #include "usbip_common.h"
  11. #include "stub.h"
  12. static int is_clear_halt_cmd(struct urb *urb)
  13. {
  14. struct usb_ctrlrequest *req;
  15. req = (struct usb_ctrlrequest *) urb->setup_packet;
  16. return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
  17. (req->bRequestType == USB_RECIP_ENDPOINT) &&
  18. (req->wValue == USB_ENDPOINT_HALT);
  19. }
  20. static int is_set_interface_cmd(struct urb *urb)
  21. {
  22. struct usb_ctrlrequest *req;
  23. req = (struct usb_ctrlrequest *) urb->setup_packet;
  24. return (req->bRequest == USB_REQ_SET_INTERFACE) &&
  25. (req->bRequestType == USB_RECIP_INTERFACE);
  26. }
  27. static int is_set_configuration_cmd(struct urb *urb)
  28. {
  29. struct usb_ctrlrequest *req;
  30. req = (struct usb_ctrlrequest *) urb->setup_packet;
  31. return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
  32. (req->bRequestType == USB_RECIP_DEVICE);
  33. }
  34. static int is_reset_device_cmd(struct urb *urb)
  35. {
  36. struct usb_ctrlrequest *req;
  37. __u16 value;
  38. __u16 index;
  39. req = (struct usb_ctrlrequest *) urb->setup_packet;
  40. value = le16_to_cpu(req->wValue);
  41. index = le16_to_cpu(req->wIndex);
  42. if ((req->bRequest == USB_REQ_SET_FEATURE) &&
  43. (req->bRequestType == USB_RT_PORT) &&
  44. (value == USB_PORT_FEAT_RESET)) {
  45. usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
  46. return 1;
  47. } else
  48. return 0;
  49. }
  50. static int tweak_clear_halt_cmd(struct urb *urb)
  51. {
  52. struct usb_ctrlrequest *req;
  53. int target_endp;
  54. int target_dir;
  55. int target_pipe;
  56. int ret;
  57. req = (struct usb_ctrlrequest *) urb->setup_packet;
  58. /*
  59. * The stalled endpoint is specified in the wIndex value. The endpoint
  60. * of the urb is the target of this clear_halt request (i.e., control
  61. * endpoint).
  62. */
  63. target_endp = le16_to_cpu(req->wIndex) & 0x000f;
  64. /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
  65. target_dir = le16_to_cpu(req->wIndex) & 0x0080;
  66. if (target_dir)
  67. target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
  68. else
  69. target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
  70. ret = usb_clear_halt(urb->dev, target_pipe);
  71. if (ret < 0)
  72. dev_err(&urb->dev->dev,
  73. "usb_clear_halt error: devnum %d endp %d ret %d\n",
  74. urb->dev->devnum, target_endp, ret);
  75. else
  76. dev_info(&urb->dev->dev,
  77. "usb_clear_halt done: devnum %d endp %d\n",
  78. urb->dev->devnum, target_endp);
  79. return ret;
  80. }
  81. static int tweak_set_interface_cmd(struct urb *urb)
  82. {
  83. struct usb_ctrlrequest *req;
  84. __u16 alternate;
  85. __u16 interface;
  86. int ret;
  87. req = (struct usb_ctrlrequest *) urb->setup_packet;
  88. alternate = le16_to_cpu(req->wValue);
  89. interface = le16_to_cpu(req->wIndex);
  90. usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
  91. interface, alternate);
  92. ret = usb_set_interface(urb->dev, interface, alternate);
  93. if (ret < 0)
  94. dev_err(&urb->dev->dev,
  95. "usb_set_interface error: inf %u alt %u ret %d\n",
  96. interface, alternate, ret);
  97. else
  98. dev_info(&urb->dev->dev,
  99. "usb_set_interface done: inf %u alt %u\n",
  100. interface, alternate);
  101. return ret;
  102. }
  103. static int tweak_set_configuration_cmd(struct urb *urb)
  104. {
  105. struct stub_priv *priv = (struct stub_priv *) urb->context;
  106. struct stub_device *sdev = priv->sdev;
  107. struct usb_ctrlrequest *req;
  108. __u16 config;
  109. int err;
  110. req = (struct usb_ctrlrequest *) urb->setup_packet;
  111. config = le16_to_cpu(req->wValue);
  112. usb_lock_device(sdev->udev);
  113. err = usb_set_configuration(sdev->udev, config);
  114. usb_unlock_device(sdev->udev);
  115. if (err && err != -ENODEV)
  116. dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
  117. config, err);
  118. return 0;
  119. }
  120. static int tweak_reset_device_cmd(struct urb *urb)
  121. {
  122. struct stub_priv *priv = (struct stub_priv *) urb->context;
  123. struct stub_device *sdev = priv->sdev;
  124. dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
  125. if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
  126. dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
  127. return 0;
  128. }
  129. usb_reset_device(sdev->udev);
  130. usb_unlock_device(sdev->udev);
  131. return 0;
  132. }
  133. /*
  134. * clear_halt, set_interface, and set_configuration require special tricks.
  135. */
  136. static void tweak_special_requests(struct urb *urb)
  137. {
  138. if (!urb || !urb->setup_packet)
  139. return;
  140. if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
  141. return;
  142. if (is_clear_halt_cmd(urb))
  143. /* tweak clear_halt */
  144. tweak_clear_halt_cmd(urb);
  145. else if (is_set_interface_cmd(urb))
  146. /* tweak set_interface */
  147. tweak_set_interface_cmd(urb);
  148. else if (is_set_configuration_cmd(urb))
  149. /* tweak set_configuration */
  150. tweak_set_configuration_cmd(urb);
  151. else if (is_reset_device_cmd(urb))
  152. tweak_reset_device_cmd(urb);
  153. else
  154. usbip_dbg_stub_rx("no need to tweak\n");
  155. }
  156. /*
  157. * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
  158. * By unlinking the urb asynchronously, stub_rx can continuously
  159. * process coming urbs. Even if the urb is unlinked, its completion
  160. * handler will be called and stub_tx will send a return pdu.
  161. *
  162. * See also comments about unlinking strategy in vhci_hcd.c.
  163. */
  164. static int stub_recv_cmd_unlink(struct stub_device *sdev,
  165. struct usbip_header *pdu)
  166. {
  167. int ret, i;
  168. unsigned long flags;
  169. struct stub_priv *priv;
  170. spin_lock_irqsave(&sdev->priv_lock, flags);
  171. list_for_each_entry(priv, &sdev->priv_init, list) {
  172. if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
  173. continue;
  174. /*
  175. * This matched urb is not completed yet (i.e., be in
  176. * flight in usb hcd hardware/driver). Now we are
  177. * cancelling it. The unlinking flag means that we are
  178. * now not going to return the normal result pdu of a
  179. * submission request, but going to return a result pdu
  180. * of the unlink request.
  181. */
  182. priv->unlinking = 1;
  183. /*
  184. * In the case that unlinking flag is on, prev->seqnum
  185. * is changed from the seqnum of the cancelling urb to
  186. * the seqnum of the unlink request. This will be used
  187. * to make the result pdu of the unlink request.
  188. */
  189. priv->seqnum = pdu->base.seqnum;
  190. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  191. /*
  192. * usb_unlink_urb() is now out of spinlocking to avoid
  193. * spinlock recursion since stub_complete() is
  194. * sometimes called in this context but not in the
  195. * interrupt context. If stub_complete() is executed
  196. * before we call usb_unlink_urb(), usb_unlink_urb()
  197. * will return an error value. In this case, stub_tx
  198. * will return the result pdu of this unlink request
  199. * though submission is completed and actual unlinking
  200. * is not executed. OK?
  201. */
  202. /* In the above case, urb->status is not -ECONNRESET,
  203. * so a driver in a client host will know the failure
  204. * of the unlink request ?
  205. */
  206. for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
  207. ret = usb_unlink_urb(priv->urbs[i]);
  208. if (ret != -EINPROGRESS)
  209. dev_err(&priv->urbs[i]->dev->dev,
  210. "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
  211. i + 1, priv->num_urbs,
  212. priv->seqnum, ret);
  213. }
  214. return 0;
  215. }
  216. usbip_dbg_stub_rx("seqnum %d is not pending\n",
  217. pdu->u.cmd_unlink.seqnum);
  218. /*
  219. * The urb of the unlink target is not found in priv_init queue. It was
  220. * already completed and its results is/was going to be sent by a
  221. * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
  222. * return the completeness of this unlink request to vhci_hcd.
  223. */
  224. stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
  225. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  226. return 0;
  227. }
  228. static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
  229. {
  230. struct usbip_device *ud = &sdev->ud;
  231. int valid = 0;
  232. if (pdu->base.devid == sdev->devid) {
  233. spin_lock_irq(&ud->lock);
  234. if (ud->status == SDEV_ST_USED) {
  235. /* A request is valid. */
  236. valid = 1;
  237. }
  238. spin_unlock_irq(&ud->lock);
  239. }
  240. return valid;
  241. }
  242. static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
  243. struct usbip_header *pdu)
  244. {
  245. struct stub_priv *priv;
  246. struct usbip_device *ud = &sdev->ud;
  247. unsigned long flags;
  248. spin_lock_irqsave(&sdev->priv_lock, flags);
  249. priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
  250. if (!priv) {
  251. dev_err(&sdev->udev->dev, "alloc stub_priv\n");
  252. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  253. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  254. return NULL;
  255. }
  256. priv->seqnum = pdu->base.seqnum;
  257. priv->sdev = sdev;
  258. /*
  259. * After a stub_priv is linked to a list_head,
  260. * our error handler can free allocated data.
  261. */
  262. list_add_tail(&priv->list, &sdev->priv_init);
  263. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  264. return priv;
  265. }
  266. static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
  267. {
  268. struct usb_device *udev = sdev->udev;
  269. struct usb_host_endpoint *ep;
  270. struct usb_endpoint_descriptor *epd = NULL;
  271. int epnum = pdu->base.ep;
  272. int dir = pdu->base.direction;
  273. if (epnum < 0 || epnum > 15)
  274. goto err_ret;
  275. if (dir == USBIP_DIR_IN)
  276. ep = udev->ep_in[epnum & 0x7f];
  277. else
  278. ep = udev->ep_out[epnum & 0x7f];
  279. if (!ep)
  280. goto err_ret;
  281. epd = &ep->desc;
  282. if (usb_endpoint_xfer_control(epd)) {
  283. if (dir == USBIP_DIR_OUT)
  284. return usb_sndctrlpipe(udev, epnum);
  285. else
  286. return usb_rcvctrlpipe(udev, epnum);
  287. }
  288. if (usb_endpoint_xfer_bulk(epd)) {
  289. if (dir == USBIP_DIR_OUT)
  290. return usb_sndbulkpipe(udev, epnum);
  291. else
  292. return usb_rcvbulkpipe(udev, epnum);
  293. }
  294. if (usb_endpoint_xfer_int(epd)) {
  295. if (dir == USBIP_DIR_OUT)
  296. return usb_sndintpipe(udev, epnum);
  297. else
  298. return usb_rcvintpipe(udev, epnum);
  299. }
  300. if (usb_endpoint_xfer_isoc(epd)) {
  301. /* validate number of packets */
  302. if (pdu->u.cmd_submit.number_of_packets < 0 ||
  303. pdu->u.cmd_submit.number_of_packets >
  304. USBIP_MAX_ISO_PACKETS) {
  305. dev_err(&sdev->udev->dev,
  306. "CMD_SUBMIT: isoc invalid num packets %d\n",
  307. pdu->u.cmd_submit.number_of_packets);
  308. return -1;
  309. }
  310. if (dir == USBIP_DIR_OUT)
  311. return usb_sndisocpipe(udev, epnum);
  312. else
  313. return usb_rcvisocpipe(udev, epnum);
  314. }
  315. err_ret:
  316. /* NOT REACHED */
  317. dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
  318. return -1;
  319. }
  320. static void masking_bogus_flags(struct urb *urb)
  321. {
  322. int xfertype;
  323. struct usb_device *dev;
  324. struct usb_host_endpoint *ep;
  325. int is_out;
  326. unsigned int allowed;
  327. if (!urb || urb->hcpriv || !urb->complete)
  328. return;
  329. dev = urb->dev;
  330. if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
  331. return;
  332. ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
  333. [usb_pipeendpoint(urb->pipe)];
  334. if (!ep)
  335. return;
  336. xfertype = usb_endpoint_type(&ep->desc);
  337. if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
  338. struct usb_ctrlrequest *setup =
  339. (struct usb_ctrlrequest *) urb->setup_packet;
  340. if (!setup)
  341. return;
  342. is_out = !(setup->bRequestType & USB_DIR_IN) ||
  343. !setup->wLength;
  344. } else {
  345. is_out = usb_endpoint_dir_out(&ep->desc);
  346. }
  347. /* enforce simple/standard policy */
  348. allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
  349. URB_DIR_MASK | URB_FREE_BUFFER);
  350. switch (xfertype) {
  351. case USB_ENDPOINT_XFER_BULK:
  352. if (is_out)
  353. allowed |= URB_ZERO_PACKET;
  354. fallthrough;
  355. default: /* all non-iso endpoints */
  356. if (!is_out)
  357. allowed |= URB_SHORT_NOT_OK;
  358. break;
  359. case USB_ENDPOINT_XFER_ISOC:
  360. allowed |= URB_ISO_ASAP;
  361. break;
  362. }
  363. urb->transfer_flags &= allowed;
  364. }
  365. static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
  366. {
  367. int ret;
  368. int i;
  369. for (i = 0; i < priv->num_urbs; i++) {
  370. ret = usbip_recv_xbuff(ud, priv->urbs[i]);
  371. if (ret < 0)
  372. break;
  373. }
  374. return ret;
  375. }
  376. static void stub_recv_cmd_submit(struct stub_device *sdev,
  377. struct usbip_header *pdu)
  378. {
  379. struct stub_priv *priv;
  380. struct usbip_device *ud = &sdev->ud;
  381. struct usb_device *udev = sdev->udev;
  382. struct scatterlist *sgl = NULL, *sg;
  383. void *buffer = NULL;
  384. unsigned long long buf_len;
  385. int nents;
  386. int num_urbs = 1;
  387. int pipe = get_pipe(sdev, pdu);
  388. int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG;
  389. int support_sg = 1;
  390. int np = 0;
  391. int ret, i;
  392. if (pipe == -1)
  393. return;
  394. /*
  395. * Smatch reported the error case where use_sg is true and buf_len is 0.
  396. * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
  397. * released by stub event handler and connection will be shut down.
  398. */
  399. priv = stub_priv_alloc(sdev, pdu);
  400. if (!priv)
  401. return;
  402. buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
  403. if (use_sg && !buf_len) {
  404. dev_err(&udev->dev, "sg buffer with zero length\n");
  405. goto err_malloc;
  406. }
  407. /* allocate urb transfer buffer, if needed */
  408. if (buf_len) {
  409. if (use_sg) {
  410. sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
  411. if (!sgl)
  412. goto err_malloc;
  413. /* Check if the server's HCD supports SG */
  414. if (!udev->bus->sg_tablesize) {
  415. /*
  416. * If the server's HCD doesn't support SG, break
  417. * a single SG request into several URBs and map
  418. * each SG list entry to corresponding URB
  419. * buffer. The previously allocated SG list is
  420. * stored in priv->sgl (If the server's HCD
  421. * support SG, SG list is stored only in
  422. * urb->sg) and it is used as an indicator that
  423. * the server split single SG request into
  424. * several URBs. Later, priv->sgl is used by
  425. * stub_complete() and stub_send_ret_submit() to
  426. * reassemble the divied URBs.
  427. */
  428. support_sg = 0;
  429. num_urbs = nents;
  430. priv->completed_urbs = 0;
  431. pdu->u.cmd_submit.transfer_flags &=
  432. ~USBIP_URB_DMA_MAP_SG;
  433. }
  434. } else {
  435. buffer = kzalloc(buf_len, GFP_KERNEL);
  436. if (!buffer)
  437. goto err_malloc;
  438. }
  439. }
  440. /* allocate urb array */
  441. priv->num_urbs = num_urbs;
  442. priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
  443. if (!priv->urbs)
  444. goto err_urbs;
  445. /* setup a urb */
  446. if (support_sg) {
  447. if (usb_pipeisoc(pipe))
  448. np = pdu->u.cmd_submit.number_of_packets;
  449. priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
  450. if (!priv->urbs[0])
  451. goto err_urb;
  452. if (buf_len) {
  453. if (use_sg) {
  454. priv->urbs[0]->sg = sgl;
  455. priv->urbs[0]->num_sgs = nents;
  456. priv->urbs[0]->transfer_buffer = NULL;
  457. } else {
  458. priv->urbs[0]->transfer_buffer = buffer;
  459. }
  460. }
  461. /* copy urb setup packet */
  462. priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
  463. 8, GFP_KERNEL);
  464. if (!priv->urbs[0]->setup_packet) {
  465. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  466. return;
  467. }
  468. usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
  469. } else {
  470. for_each_sg(sgl, sg, nents, i) {
  471. priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  472. /* The URBs which is previously allocated will be freed
  473. * in stub_device_cleanup_urbs() if error occurs.
  474. */
  475. if (!priv->urbs[i])
  476. goto err_urb;
  477. usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
  478. priv->urbs[i]->transfer_buffer = sg_virt(sg);
  479. priv->urbs[i]->transfer_buffer_length = sg->length;
  480. }
  481. priv->sgl = sgl;
  482. }
  483. for (i = 0; i < num_urbs; i++) {
  484. /* set other members from the base header of pdu */
  485. priv->urbs[i]->context = (void *) priv;
  486. priv->urbs[i]->dev = udev;
  487. priv->urbs[i]->pipe = pipe;
  488. priv->urbs[i]->complete = stub_complete;
  489. /* no need to submit an intercepted request, but harmless? */
  490. tweak_special_requests(priv->urbs[i]);
  491. masking_bogus_flags(priv->urbs[i]);
  492. }
  493. if (stub_recv_xbuff(ud, priv) < 0)
  494. return;
  495. if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
  496. return;
  497. /* urb is now ready to submit */
  498. for (i = 0; i < priv->num_urbs; i++) {
  499. ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
  500. if (ret == 0)
  501. usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
  502. pdu->base.seqnum);
  503. else {
  504. dev_err(&udev->dev, "submit_urb error, %d\n", ret);
  505. usbip_dump_header(pdu);
  506. usbip_dump_urb(priv->urbs[i]);
  507. /*
  508. * Pessimistic.
  509. * This connection will be discarded.
  510. */
  511. usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
  512. break;
  513. }
  514. }
  515. usbip_dbg_stub_rx("Leave\n");
  516. return;
  517. err_urb:
  518. kfree(priv->urbs);
  519. err_urbs:
  520. kfree(buffer);
  521. sgl_free(sgl);
  522. err_malloc:
  523. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  524. }
  525. /* recv a pdu */
  526. static void stub_rx_pdu(struct usbip_device *ud)
  527. {
  528. int ret;
  529. struct usbip_header pdu;
  530. struct stub_device *sdev = container_of(ud, struct stub_device, ud);
  531. struct device *dev = &sdev->udev->dev;
  532. usbip_dbg_stub_rx("Enter\n");
  533. memset(&pdu, 0, sizeof(pdu));
  534. /* receive a pdu header */
  535. ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
  536. if (ret != sizeof(pdu)) {
  537. dev_err(dev, "recv a header, %d\n", ret);
  538. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  539. return;
  540. }
  541. usbip_header_correct_endian(&pdu, 0);
  542. if (usbip_dbg_flag_stub_rx)
  543. usbip_dump_header(&pdu);
  544. if (!valid_request(sdev, &pdu)) {
  545. dev_err(dev, "recv invalid request\n");
  546. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  547. return;
  548. }
  549. switch (pdu.base.command) {
  550. case USBIP_CMD_UNLINK:
  551. stub_recv_cmd_unlink(sdev, &pdu);
  552. break;
  553. case USBIP_CMD_SUBMIT:
  554. stub_recv_cmd_submit(sdev, &pdu);
  555. break;
  556. default:
  557. /* NOTREACHED */
  558. dev_err(dev, "unknown pdu\n");
  559. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  560. break;
  561. }
  562. }
  563. int stub_rx_loop(void *data)
  564. {
  565. struct usbip_device *ud = data;
  566. while (!kthread_should_stop()) {
  567. if (usbip_event_happened(ud))
  568. break;
  569. stub_rx_pdu(ud);
  570. }
  571. return 0;
  572. }