xhci-dbgtty.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xhci-dbgtty.c - tty glue for xHCI debug capability
  4. *
  5. * Copyright (C) 2017 Intel Corporation
  6. *
  7. * Author: Lu Baolu <[email protected]>
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/tty.h>
  11. #include <linux/tty_flip.h>
  12. #include <linux/idr.h>
  13. #include "xhci.h"
  14. #include "xhci-dbgcap.h"
  15. static struct tty_driver *dbc_tty_driver;
  16. static struct idr dbc_tty_minors;
  17. static DEFINE_MUTEX(dbc_tty_minors_lock);
  18. static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
  19. {
  20. return dbc->priv;
  21. }
  22. static unsigned int
  23. dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
  24. {
  25. unsigned int len;
  26. len = kfifo_len(&port->write_fifo);
  27. if (len < size)
  28. size = len;
  29. if (size != 0)
  30. size = kfifo_out(&port->write_fifo, packet, size);
  31. return size;
  32. }
  33. static int dbc_start_tx(struct dbc_port *port)
  34. __releases(&port->port_lock)
  35. __acquires(&port->port_lock)
  36. {
  37. int len;
  38. struct dbc_request *req;
  39. int status = 0;
  40. bool do_tty_wake = false;
  41. struct list_head *pool = &port->write_pool;
  42. while (!list_empty(pool)) {
  43. req = list_entry(pool->next, struct dbc_request, list_pool);
  44. len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
  45. if (len == 0)
  46. break;
  47. do_tty_wake = true;
  48. req->length = len;
  49. list_del(&req->list_pool);
  50. spin_unlock(&port->port_lock);
  51. status = dbc_ep_queue(req);
  52. spin_lock(&port->port_lock);
  53. if (status) {
  54. list_add(&req->list_pool, pool);
  55. break;
  56. }
  57. }
  58. if (do_tty_wake && port->port.tty)
  59. tty_wakeup(port->port.tty);
  60. return status;
  61. }
  62. static void dbc_start_rx(struct dbc_port *port)
  63. __releases(&port->port_lock)
  64. __acquires(&port->port_lock)
  65. {
  66. struct dbc_request *req;
  67. int status;
  68. struct list_head *pool = &port->read_pool;
  69. while (!list_empty(pool)) {
  70. if (!port->port.tty)
  71. break;
  72. req = list_entry(pool->next, struct dbc_request, list_pool);
  73. list_del(&req->list_pool);
  74. req->length = DBC_MAX_PACKET;
  75. spin_unlock(&port->port_lock);
  76. status = dbc_ep_queue(req);
  77. spin_lock(&port->port_lock);
  78. if (status) {
  79. list_add(&req->list_pool, pool);
  80. break;
  81. }
  82. }
  83. }
  84. static void
  85. dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
  86. {
  87. unsigned long flags;
  88. struct dbc_port *port = dbc_to_port(dbc);
  89. spin_lock_irqsave(&port->port_lock, flags);
  90. list_add_tail(&req->list_pool, &port->read_queue);
  91. tasklet_schedule(&port->push);
  92. spin_unlock_irqrestore(&port->port_lock, flags);
  93. }
  94. static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
  95. {
  96. unsigned long flags;
  97. struct dbc_port *port = dbc_to_port(dbc);
  98. spin_lock_irqsave(&port->port_lock, flags);
  99. list_add(&req->list_pool, &port->write_pool);
  100. switch (req->status) {
  101. case 0:
  102. dbc_start_tx(port);
  103. break;
  104. case -ESHUTDOWN:
  105. break;
  106. default:
  107. dev_warn(dbc->dev, "unexpected write complete status %d\n",
  108. req->status);
  109. break;
  110. }
  111. spin_unlock_irqrestore(&port->port_lock, flags);
  112. }
  113. static void xhci_dbc_free_req(struct dbc_request *req)
  114. {
  115. kfree(req->buf);
  116. dbc_free_request(req);
  117. }
  118. static int
  119. xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
  120. struct list_head *head,
  121. void (*fn)(struct xhci_dbc *, struct dbc_request *))
  122. {
  123. int i;
  124. struct dbc_request *req;
  125. for (i = 0; i < DBC_QUEUE_SIZE; i++) {
  126. req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
  127. if (!req)
  128. break;
  129. req->length = DBC_MAX_PACKET;
  130. req->buf = kmalloc(req->length, GFP_KERNEL);
  131. if (!req->buf) {
  132. dbc_free_request(req);
  133. break;
  134. }
  135. req->complete = fn;
  136. list_add_tail(&req->list_pool, head);
  137. }
  138. return list_empty(head) ? -ENOMEM : 0;
  139. }
  140. static void
  141. xhci_dbc_free_requests(struct list_head *head)
  142. {
  143. struct dbc_request *req;
  144. while (!list_empty(head)) {
  145. req = list_entry(head->next, struct dbc_request, list_pool);
  146. list_del(&req->list_pool);
  147. xhci_dbc_free_req(req);
  148. }
  149. }
  150. static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
  151. {
  152. struct dbc_port *port;
  153. mutex_lock(&dbc_tty_minors_lock);
  154. port = idr_find(&dbc_tty_minors, tty->index);
  155. mutex_unlock(&dbc_tty_minors_lock);
  156. if (!port)
  157. return -ENXIO;
  158. tty->driver_data = port;
  159. return tty_port_install(&port->port, driver, tty);
  160. }
  161. static int dbc_tty_open(struct tty_struct *tty, struct file *file)
  162. {
  163. struct dbc_port *port = tty->driver_data;
  164. return tty_port_open(&port->port, tty, file);
  165. }
  166. static void dbc_tty_close(struct tty_struct *tty, struct file *file)
  167. {
  168. struct dbc_port *port = tty->driver_data;
  169. tty_port_close(&port->port, tty, file);
  170. }
  171. static int dbc_tty_write(struct tty_struct *tty,
  172. const unsigned char *buf,
  173. int count)
  174. {
  175. struct dbc_port *port = tty->driver_data;
  176. unsigned long flags;
  177. spin_lock_irqsave(&port->port_lock, flags);
  178. if (count)
  179. count = kfifo_in(&port->write_fifo, buf, count);
  180. dbc_start_tx(port);
  181. spin_unlock_irqrestore(&port->port_lock, flags);
  182. return count;
  183. }
  184. static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
  185. {
  186. struct dbc_port *port = tty->driver_data;
  187. unsigned long flags;
  188. int status;
  189. spin_lock_irqsave(&port->port_lock, flags);
  190. status = kfifo_put(&port->write_fifo, ch);
  191. spin_unlock_irqrestore(&port->port_lock, flags);
  192. return status;
  193. }
  194. static void dbc_tty_flush_chars(struct tty_struct *tty)
  195. {
  196. struct dbc_port *port = tty->driver_data;
  197. unsigned long flags;
  198. spin_lock_irqsave(&port->port_lock, flags);
  199. dbc_start_tx(port);
  200. spin_unlock_irqrestore(&port->port_lock, flags);
  201. }
  202. static unsigned int dbc_tty_write_room(struct tty_struct *tty)
  203. {
  204. struct dbc_port *port = tty->driver_data;
  205. unsigned long flags;
  206. unsigned int room;
  207. spin_lock_irqsave(&port->port_lock, flags);
  208. room = kfifo_avail(&port->write_fifo);
  209. spin_unlock_irqrestore(&port->port_lock, flags);
  210. return room;
  211. }
  212. static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
  213. {
  214. struct dbc_port *port = tty->driver_data;
  215. unsigned long flags;
  216. unsigned int chars;
  217. spin_lock_irqsave(&port->port_lock, flags);
  218. chars = kfifo_len(&port->write_fifo);
  219. spin_unlock_irqrestore(&port->port_lock, flags);
  220. return chars;
  221. }
  222. static void dbc_tty_unthrottle(struct tty_struct *tty)
  223. {
  224. struct dbc_port *port = tty->driver_data;
  225. unsigned long flags;
  226. spin_lock_irqsave(&port->port_lock, flags);
  227. tasklet_schedule(&port->push);
  228. spin_unlock_irqrestore(&port->port_lock, flags);
  229. }
  230. static const struct tty_operations dbc_tty_ops = {
  231. .install = dbc_tty_install,
  232. .open = dbc_tty_open,
  233. .close = dbc_tty_close,
  234. .write = dbc_tty_write,
  235. .put_char = dbc_tty_put_char,
  236. .flush_chars = dbc_tty_flush_chars,
  237. .write_room = dbc_tty_write_room,
  238. .chars_in_buffer = dbc_tty_chars_in_buffer,
  239. .unthrottle = dbc_tty_unthrottle,
  240. };
  241. static void dbc_rx_push(struct tasklet_struct *t)
  242. {
  243. struct dbc_request *req;
  244. struct tty_struct *tty;
  245. unsigned long flags;
  246. bool do_push = false;
  247. bool disconnect = false;
  248. struct dbc_port *port = from_tasklet(port, t, push);
  249. struct list_head *queue = &port->read_queue;
  250. spin_lock_irqsave(&port->port_lock, flags);
  251. tty = port->port.tty;
  252. while (!list_empty(queue)) {
  253. req = list_first_entry(queue, struct dbc_request, list_pool);
  254. if (tty && tty_throttled(tty))
  255. break;
  256. switch (req->status) {
  257. case 0:
  258. break;
  259. case -ESHUTDOWN:
  260. disconnect = true;
  261. break;
  262. default:
  263. pr_warn("ttyDBC0: unexpected RX status %d\n",
  264. req->status);
  265. break;
  266. }
  267. if (req->actual) {
  268. char *packet = req->buf;
  269. unsigned int n, size = req->actual;
  270. int count;
  271. n = port->n_read;
  272. if (n) {
  273. packet += n;
  274. size -= n;
  275. }
  276. count = tty_insert_flip_string(&port->port, packet,
  277. size);
  278. if (count)
  279. do_push = true;
  280. if (count != size) {
  281. port->n_read += count;
  282. break;
  283. }
  284. port->n_read = 0;
  285. }
  286. list_move(&req->list_pool, &port->read_pool);
  287. }
  288. if (do_push)
  289. tty_flip_buffer_push(&port->port);
  290. if (!list_empty(queue) && tty) {
  291. if (!tty_throttled(tty)) {
  292. if (do_push)
  293. tasklet_schedule(&port->push);
  294. else
  295. pr_warn("ttyDBC0: RX not scheduled?\n");
  296. }
  297. }
  298. if (!disconnect)
  299. dbc_start_rx(port);
  300. spin_unlock_irqrestore(&port->port_lock, flags);
  301. }
  302. static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
  303. {
  304. unsigned long flags;
  305. struct dbc_port *port = container_of(_port, struct dbc_port, port);
  306. spin_lock_irqsave(&port->port_lock, flags);
  307. dbc_start_rx(port);
  308. spin_unlock_irqrestore(&port->port_lock, flags);
  309. return 0;
  310. }
  311. static const struct tty_port_operations dbc_port_ops = {
  312. .activate = dbc_port_activate,
  313. };
  314. static void
  315. xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
  316. {
  317. tty_port_init(&port->port);
  318. spin_lock_init(&port->port_lock);
  319. tasklet_setup(&port->push, dbc_rx_push);
  320. INIT_LIST_HEAD(&port->read_pool);
  321. INIT_LIST_HEAD(&port->read_queue);
  322. INIT_LIST_HEAD(&port->write_pool);
  323. port->port.ops = &dbc_port_ops;
  324. port->n_read = 0;
  325. }
  326. static void
  327. xhci_dbc_tty_exit_port(struct dbc_port *port)
  328. {
  329. tasklet_kill(&port->push);
  330. tty_port_destroy(&port->port);
  331. }
  332. static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
  333. {
  334. int ret;
  335. struct device *tty_dev;
  336. struct dbc_port *port = dbc_to_port(dbc);
  337. if (port->registered)
  338. return -EBUSY;
  339. xhci_dbc_tty_init_port(dbc, port);
  340. mutex_lock(&dbc_tty_minors_lock);
  341. port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
  342. mutex_unlock(&dbc_tty_minors_lock);
  343. if (port->minor < 0) {
  344. ret = port->minor;
  345. goto err_idr;
  346. }
  347. ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
  348. if (ret)
  349. goto err_exit_port;
  350. ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
  351. dbc_read_complete);
  352. if (ret)
  353. goto err_free_fifo;
  354. ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
  355. dbc_write_complete);
  356. if (ret)
  357. goto err_free_requests;
  358. tty_dev = tty_port_register_device(&port->port,
  359. dbc_tty_driver, port->minor, NULL);
  360. if (IS_ERR(tty_dev)) {
  361. ret = PTR_ERR(tty_dev);
  362. goto err_free_requests;
  363. }
  364. port->registered = true;
  365. return 0;
  366. err_free_requests:
  367. xhci_dbc_free_requests(&port->read_pool);
  368. xhci_dbc_free_requests(&port->write_pool);
  369. err_free_fifo:
  370. kfifo_free(&port->write_fifo);
  371. err_exit_port:
  372. idr_remove(&dbc_tty_minors, port->minor);
  373. err_idr:
  374. xhci_dbc_tty_exit_port(port);
  375. dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
  376. return ret;
  377. }
  378. static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
  379. {
  380. struct dbc_port *port = dbc_to_port(dbc);
  381. if (!port->registered)
  382. return;
  383. tty_unregister_device(dbc_tty_driver, port->minor);
  384. xhci_dbc_tty_exit_port(port);
  385. port->registered = false;
  386. mutex_lock(&dbc_tty_minors_lock);
  387. idr_remove(&dbc_tty_minors, port->minor);
  388. mutex_unlock(&dbc_tty_minors_lock);
  389. kfifo_free(&port->write_fifo);
  390. xhci_dbc_free_requests(&port->read_pool);
  391. xhci_dbc_free_requests(&port->read_queue);
  392. xhci_dbc_free_requests(&port->write_pool);
  393. }
  394. static const struct dbc_driver dbc_driver = {
  395. .configure = xhci_dbc_tty_register_device,
  396. .disconnect = xhci_dbc_tty_unregister_device,
  397. };
  398. int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
  399. {
  400. struct xhci_dbc *dbc;
  401. struct dbc_port *port;
  402. int status;
  403. if (!dbc_tty_driver)
  404. return -ENODEV;
  405. port = kzalloc(sizeof(*port), GFP_KERNEL);
  406. if (!port)
  407. return -ENOMEM;
  408. dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
  409. if (!dbc) {
  410. status = -ENOMEM;
  411. goto out2;
  412. }
  413. dbc->priv = port;
  414. /* get rid of xhci once this is a real driver binding to a device */
  415. xhci->dbc = dbc;
  416. return 0;
  417. out2:
  418. kfree(port);
  419. return status;
  420. }
  421. /*
  422. * undo what probe did, assume dbc is stopped already.
  423. * we also assume tty_unregister_device() is called before this
  424. */
  425. void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
  426. {
  427. struct dbc_port *port = dbc_to_port(dbc);
  428. xhci_dbc_remove(dbc);
  429. kfree(port);
  430. }
  431. int dbc_tty_init(void)
  432. {
  433. int ret;
  434. idr_init(&dbc_tty_minors);
  435. dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
  436. TTY_DRIVER_DYNAMIC_DEV);
  437. if (IS_ERR(dbc_tty_driver)) {
  438. idr_destroy(&dbc_tty_minors);
  439. return PTR_ERR(dbc_tty_driver);
  440. }
  441. dbc_tty_driver->driver_name = "dbc_serial";
  442. dbc_tty_driver->name = "ttyDBC";
  443. dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
  444. dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
  445. dbc_tty_driver->init_termios = tty_std_termios;
  446. dbc_tty_driver->init_termios.c_cflag =
  447. B9600 | CS8 | CREAD | HUPCL | CLOCAL;
  448. dbc_tty_driver->init_termios.c_ispeed = 9600;
  449. dbc_tty_driver->init_termios.c_ospeed = 9600;
  450. tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
  451. ret = tty_register_driver(dbc_tty_driver);
  452. if (ret) {
  453. pr_err("Can't register dbc tty driver\n");
  454. tty_driver_kref_put(dbc_tty_driver);
  455. idr_destroy(&dbc_tty_minors);
  456. }
  457. return ret;
  458. }
  459. void dbc_tty_exit(void)
  460. {
  461. if (dbc_tty_driver) {
  462. tty_unregister_driver(dbc_tty_driver);
  463. tty_driver_kref_put(dbc_tty_driver);
  464. dbc_tty_driver = NULL;
  465. }
  466. idr_destroy(&dbc_tty_minors);
  467. }