tty_buffer.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Tty buffer allocation management
  4. */
  5. #include <linux/types.h>
  6. #include <linux/errno.h>
  7. #include <linux/minmax.h>
  8. #include <linux/tty.h>
  9. #include <linux/tty_driver.h>
  10. #include <linux/tty_flip.h>
  11. #include <linux/timer.h>
  12. #include <linux/string.h>
  13. #include <linux/slab.h>
  14. #include <linux/sched.h>
  15. #include <linux/wait.h>
  16. #include <linux/bitops.h>
  17. #include <linux/delay.h>
  18. #include <linux/module.h>
  19. #include <linux/ratelimit.h>
  20. #include "tty.h"
  21. #define MIN_TTYB_SIZE 256
  22. #define TTYB_ALIGN_MASK 255
  23. /*
  24. * Byte threshold to limit memory consumption for flip buffers.
  25. * The actual memory limit is > 2x this amount.
  26. */
  27. #define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
  28. /*
  29. * We default to dicing tty buffer allocations to this many characters
  30. * in order to avoid multiple page allocations. We know the size of
  31. * tty_buffer itself but it must also be taken into account that the
  32. * buffer is 256 byte aligned. See tty_buffer_find for the allocation
  33. * logic this must match.
  34. */
  35. #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
  36. /**
  37. * tty_buffer_lock_exclusive - gain exclusive access to buffer
  38. * @port: tty port owning the flip buffer
  39. *
  40. * Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding
  41. * the buffer work and any pending flush from using the flip buffer. Data can
  42. * continue to be added concurrently to the flip buffer from the driver side.
  43. *
  44. * See also tty_buffer_unlock_exclusive().
  45. */
  46. void tty_buffer_lock_exclusive(struct tty_port *port)
  47. {
  48. struct tty_bufhead *buf = &port->buf;
  49. atomic_inc(&buf->priority);
  50. mutex_lock(&buf->lock);
  51. }
  52. EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
  53. /**
  54. * tty_buffer_unlock_exclusive - release exclusive access
  55. * @port: tty port owning the flip buffer
  56. *
  57. * The buffer work is restarted if there is data in the flip buffer.
  58. *
  59. * See also tty_buffer_lock_exclusive().
  60. */
  61. void tty_buffer_unlock_exclusive(struct tty_port *port)
  62. {
  63. struct tty_bufhead *buf = &port->buf;
  64. int restart;
  65. restart = buf->head->commit != buf->head->read;
  66. atomic_dec(&buf->priority);
  67. mutex_unlock(&buf->lock);
  68. if (restart)
  69. queue_work(system_unbound_wq, &buf->work);
  70. }
  71. EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
  72. /**
  73. * tty_buffer_space_avail - return unused buffer space
  74. * @port: tty port owning the flip buffer
  75. *
  76. * Returns: the # of bytes which can be written by the driver without reaching
  77. * the buffer limit.
  78. *
  79. * Note: this does not guarantee that memory is available to write the returned
  80. * # of bytes (use tty_prepare_flip_string() to pre-allocate if memory
  81. * guarantee is required).
  82. */
  83. unsigned int tty_buffer_space_avail(struct tty_port *port)
  84. {
  85. int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
  86. return max(space, 0);
  87. }
  88. EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
  89. static void tty_buffer_reset(struct tty_buffer *p, size_t size)
  90. {
  91. p->used = 0;
  92. p->size = size;
  93. p->next = NULL;
  94. p->commit = 0;
  95. p->lookahead = 0;
  96. p->read = 0;
  97. p->flags = 0;
  98. }
  99. /**
  100. * tty_buffer_free_all - free buffers used by a tty
  101. * @port: tty port to free from
  102. *
  103. * Remove all the buffers pending on a tty whether queued with data or in the
  104. * free ring. Must be called when the tty is no longer in use.
  105. */
  106. void tty_buffer_free_all(struct tty_port *port)
  107. {
  108. struct tty_bufhead *buf = &port->buf;
  109. struct tty_buffer *p, *next;
  110. struct llist_node *llist;
  111. unsigned int freed = 0;
  112. int still_used;
  113. while ((p = buf->head) != NULL) {
  114. buf->head = p->next;
  115. freed += p->size;
  116. if (p->size > 0)
  117. kfree(p);
  118. }
  119. llist = llist_del_all(&buf->free);
  120. llist_for_each_entry_safe(p, next, llist, free)
  121. kfree(p);
  122. tty_buffer_reset(&buf->sentinel, 0);
  123. buf->head = &buf->sentinel;
  124. buf->tail = &buf->sentinel;
  125. still_used = atomic_xchg(&buf->mem_used, 0);
  126. WARN(still_used != freed, "we still have not freed %d bytes!",
  127. still_used - freed);
  128. }
  129. /**
  130. * tty_buffer_alloc - allocate a tty buffer
  131. * @port: tty port
  132. * @size: desired size (characters)
  133. *
  134. * Allocate a new tty buffer to hold the desired number of characters. We
  135. * round our buffers off in 256 character chunks to get better allocation
  136. * behaviour.
  137. *
  138. * Returns: %NULL if out of memory or the allocation would exceed the per
  139. * device queue.
  140. */
  141. static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
  142. {
  143. struct llist_node *free;
  144. struct tty_buffer *p;
  145. /* Round the buffer size out */
  146. size = __ALIGN_MASK(size, TTYB_ALIGN_MASK);
  147. if (size <= MIN_TTYB_SIZE) {
  148. free = llist_del_first(&port->buf.free);
  149. if (free) {
  150. p = llist_entry(free, struct tty_buffer, free);
  151. goto found;
  152. }
  153. }
  154. /* Should possibly check if this fails for the largest buffer we
  155. * have queued and recycle that ?
  156. */
  157. if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
  158. return NULL;
  159. p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
  160. GFP_ATOMIC | __GFP_NOWARN);
  161. if (p == NULL)
  162. return NULL;
  163. found:
  164. tty_buffer_reset(p, size);
  165. atomic_add(size, &port->buf.mem_used);
  166. return p;
  167. }
  168. /**
  169. * tty_buffer_free - free a tty buffer
  170. * @port: tty port owning the buffer
  171. * @b: the buffer to free
  172. *
  173. * Free a tty buffer, or add it to the free list according to our internal
  174. * strategy.
  175. */
  176. static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
  177. {
  178. struct tty_bufhead *buf = &port->buf;
  179. /* Dumb strategy for now - should keep some stats */
  180. WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
  181. if (b->size > MIN_TTYB_SIZE)
  182. kfree(b);
  183. else if (b->size > 0)
  184. llist_add(&b->free, &buf->free);
  185. }
  186. /**
  187. * tty_buffer_flush - flush full tty buffers
  188. * @tty: tty to flush
  189. * @ld: optional ldisc ptr (must be referenced)
  190. *
  191. * Flush all the buffers containing receive data. If @ld != %NULL, flush the
  192. * ldisc input buffer.
  193. *
  194. * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
  195. */
  196. void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
  197. {
  198. struct tty_port *port = tty->port;
  199. struct tty_bufhead *buf = &port->buf;
  200. struct tty_buffer *next;
  201. atomic_inc(&buf->priority);
  202. mutex_lock(&buf->lock);
  203. /* paired w/ release in __tty_buffer_request_room; ensures there are
  204. * no pending memory accesses to the freed buffer
  205. */
  206. while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
  207. tty_buffer_free(port, buf->head);
  208. buf->head = next;
  209. }
  210. buf->head->read = buf->head->commit;
  211. buf->head->lookahead = buf->head->read;
  212. if (ld && ld->ops->flush_buffer)
  213. ld->ops->flush_buffer(tty);
  214. atomic_dec(&buf->priority);
  215. mutex_unlock(&buf->lock);
  216. }
  217. /**
  218. * __tty_buffer_request_room - grow tty buffer if needed
  219. * @port: tty port
  220. * @size: size desired
  221. * @flags: buffer flags if new buffer allocated (default = 0)
  222. *
  223. * Make at least @size bytes of linear space available for the tty buffer.
  224. *
  225. * Will change over to a new buffer if the current buffer is encoded as
  226. * %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags
  227. * buffer.
  228. *
  229. * Returns: the size we managed to find.
  230. */
  231. static int __tty_buffer_request_room(struct tty_port *port, size_t size,
  232. int flags)
  233. {
  234. struct tty_bufhead *buf = &port->buf;
  235. struct tty_buffer *b, *n;
  236. int left, change;
  237. b = buf->tail;
  238. if (b->flags & TTYB_NORMAL)
  239. left = 2 * b->size - b->used;
  240. else
  241. left = b->size - b->used;
  242. change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL);
  243. if (change || left < size) {
  244. /* This is the slow path - looking for new buffers to use */
  245. n = tty_buffer_alloc(port, size);
  246. if (n != NULL) {
  247. n->flags = flags;
  248. buf->tail = n;
  249. /*
  250. * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
  251. * ensures they see all buffer data.
  252. */
  253. smp_store_release(&b->commit, b->used);
  254. /*
  255. * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
  256. * ensures the latest commit value can be read before the head
  257. * is advanced to the next buffer.
  258. */
  259. smp_store_release(&b->next, n);
  260. } else if (change)
  261. size = 0;
  262. else
  263. size = left;
  264. }
  265. return size;
  266. }
  267. int tty_buffer_request_room(struct tty_port *port, size_t size)
  268. {
  269. return __tty_buffer_request_room(port, size, 0);
  270. }
  271. EXPORT_SYMBOL_GPL(tty_buffer_request_room);
  272. /**
  273. * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
  274. * @port: tty port
  275. * @chars: characters
  276. * @flag: flag value for each character
  277. * @size: size
  278. *
  279. * Queue a series of bytes to the tty buffering. All the characters passed are
  280. * marked with the supplied flag.
  281. *
  282. * Returns: the number added.
  283. */
  284. int tty_insert_flip_string_fixed_flag(struct tty_port *port,
  285. const unsigned char *chars, char flag, size_t size)
  286. {
  287. int copied = 0;
  288. do {
  289. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  290. int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
  291. int space = __tty_buffer_request_room(port, goal, flags);
  292. struct tty_buffer *tb = port->buf.tail;
  293. if (unlikely(space == 0))
  294. break;
  295. memcpy(char_buf_ptr(tb, tb->used), chars, space);
  296. if (~tb->flags & TTYB_NORMAL)
  297. memset(flag_buf_ptr(tb, tb->used), flag, space);
  298. tb->used += space;
  299. copied += space;
  300. chars += space;
  301. /* There is a small chance that we need to split the data over
  302. * several buffers. If this is the case we must loop.
  303. */
  304. } while (unlikely(size > copied));
  305. return copied;
  306. }
  307. EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
  308. /**
  309. * tty_insert_flip_string_flags - add characters to the tty buffer
  310. * @port: tty port
  311. * @chars: characters
  312. * @flags: flag bytes
  313. * @size: size
  314. *
  315. * Queue a series of bytes to the tty buffering. For each character the flags
  316. * array indicates the status of the character.
  317. *
  318. * Returns: the number added.
  319. */
  320. int tty_insert_flip_string_flags(struct tty_port *port,
  321. const unsigned char *chars, const char *flags, size_t size)
  322. {
  323. int copied = 0;
  324. do {
  325. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  326. int space = tty_buffer_request_room(port, goal);
  327. struct tty_buffer *tb = port->buf.tail;
  328. if (unlikely(space == 0))
  329. break;
  330. memcpy(char_buf_ptr(tb, tb->used), chars, space);
  331. memcpy(flag_buf_ptr(tb, tb->used), flags, space);
  332. tb->used += space;
  333. copied += space;
  334. chars += space;
  335. flags += space;
  336. /* There is a small chance that we need to split the data over
  337. * several buffers. If this is the case we must loop.
  338. */
  339. } while (unlikely(size > copied));
  340. return copied;
  341. }
  342. EXPORT_SYMBOL(tty_insert_flip_string_flags);
  343. /**
  344. * __tty_insert_flip_char - add one character to the tty buffer
  345. * @port: tty port
  346. * @ch: character
  347. * @flag: flag byte
  348. *
  349. * Queue a single byte @ch to the tty buffering, with an optional flag. This is
  350. * the slow path of tty_insert_flip_char().
  351. */
  352. int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
  353. {
  354. struct tty_buffer *tb;
  355. int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
  356. if (!__tty_buffer_request_room(port, 1, flags))
  357. return 0;
  358. tb = port->buf.tail;
  359. if (~tb->flags & TTYB_NORMAL)
  360. *flag_buf_ptr(tb, tb->used) = flag;
  361. *char_buf_ptr(tb, tb->used++) = ch;
  362. return 1;
  363. }
  364. EXPORT_SYMBOL(__tty_insert_flip_char);
  365. /**
  366. * tty_prepare_flip_string - make room for characters
  367. * @port: tty port
  368. * @chars: return pointer for character write area
  369. * @size: desired size
  370. *
  371. * Prepare a block of space in the buffer for data.
  372. *
  373. * This is used for drivers that need their own block copy routines into the
  374. * buffer. There is no guarantee the buffer is a DMA target!
  375. *
  376. * Returns: the length available and buffer pointer (@chars) to the space which
  377. * is now allocated and accounted for as ready for normal characters.
  378. */
  379. int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
  380. size_t size)
  381. {
  382. int space = __tty_buffer_request_room(port, size, TTYB_NORMAL);
  383. if (likely(space)) {
  384. struct tty_buffer *tb = port->buf.tail;
  385. *chars = char_buf_ptr(tb, tb->used);
  386. if (~tb->flags & TTYB_NORMAL)
  387. memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
  388. tb->used += space;
  389. }
  390. return space;
  391. }
  392. EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
  393. /**
  394. * tty_ldisc_receive_buf - forward data to line discipline
  395. * @ld: line discipline to process input
  396. * @p: char buffer
  397. * @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer
  398. * @count: number of bytes to process
  399. *
  400. * Callers other than flush_to_ldisc() need to exclude the kworker from
  401. * concurrent use of the line discipline, see paste_selection().
  402. *
  403. * Returns: the number of bytes processed.
  404. */
  405. int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
  406. const char *f, int count)
  407. {
  408. if (ld->ops->receive_buf2)
  409. count = ld->ops->receive_buf2(ld->tty, p, f, count);
  410. else {
  411. count = min_t(int, count, ld->tty->receive_room);
  412. if (count && ld->ops->receive_buf)
  413. ld->ops->receive_buf(ld->tty, p, f, count);
  414. }
  415. return count;
  416. }
  417. EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
  418. static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
  419. {
  420. head->lookahead = max(head->lookahead, head->read);
  421. while (head) {
  422. struct tty_buffer *next;
  423. unsigned int count;
  424. /*
  425. * Paired w/ release in __tty_buffer_request_room();
  426. * ensures commit value read is not stale if the head
  427. * is advancing to the next buffer.
  428. */
  429. next = smp_load_acquire(&head->next);
  430. /*
  431. * Paired w/ release in __tty_buffer_request_room() or in
  432. * tty_buffer_flush(); ensures we see the committed buffer data.
  433. */
  434. count = smp_load_acquire(&head->commit) - head->lookahead;
  435. if (!count) {
  436. head = next;
  437. continue;
  438. }
  439. if (port->client_ops->lookahead_buf) {
  440. unsigned char *p, *f = NULL;
  441. p = char_buf_ptr(head, head->lookahead);
  442. if (~head->flags & TTYB_NORMAL)
  443. f = flag_buf_ptr(head, head->lookahead);
  444. port->client_ops->lookahead_buf(port, p, f, count);
  445. }
  446. head->lookahead += count;
  447. }
  448. }
  449. static int
  450. receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
  451. {
  452. unsigned char *p = char_buf_ptr(head, head->read);
  453. const char *f = NULL;
  454. int n;
  455. if (~head->flags & TTYB_NORMAL)
  456. f = flag_buf_ptr(head, head->read);
  457. n = port->client_ops->receive_buf(port, p, f, count);
  458. if (n > 0)
  459. memset(p, 0, n);
  460. return n;
  461. }
  462. /**
  463. * flush_to_ldisc - flush data from buffer to ldisc
  464. * @work: tty structure passed from work queue.
  465. *
  466. * This routine is called out of the software interrupt to flush data from the
  467. * buffer chain to the line discipline.
  468. *
  469. * The receive_buf() method is single threaded for each tty instance.
  470. *
  471. * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
  472. */
  473. static void flush_to_ldisc(struct work_struct *work)
  474. {
  475. struct tty_port *port = container_of(work, struct tty_port, buf.work);
  476. struct tty_bufhead *buf = &port->buf;
  477. mutex_lock(&buf->lock);
  478. while (1) {
  479. struct tty_buffer *head = buf->head;
  480. struct tty_buffer *next;
  481. int count, rcvd;
  482. /* Ldisc or user is trying to gain exclusive access */
  483. if (atomic_read(&buf->priority))
  484. break;
  485. /* paired w/ release in __tty_buffer_request_room();
  486. * ensures commit value read is not stale if the head
  487. * is advancing to the next buffer
  488. */
  489. next = smp_load_acquire(&head->next);
  490. /* paired w/ release in __tty_buffer_request_room() or in
  491. * tty_buffer_flush(); ensures we see the committed buffer data
  492. */
  493. count = smp_load_acquire(&head->commit) - head->read;
  494. if (!count) {
  495. if (next == NULL)
  496. break;
  497. buf->head = next;
  498. tty_buffer_free(port, head);
  499. continue;
  500. }
  501. rcvd = receive_buf(port, head, count);
  502. head->read += rcvd;
  503. if (rcvd < count)
  504. lookahead_bufs(port, head);
  505. if (!rcvd)
  506. break;
  507. if (need_resched())
  508. cond_resched();
  509. }
  510. mutex_unlock(&buf->lock);
  511. }
  512. static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
  513. {
  514. /*
  515. * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
  516. * buffer data.
  517. */
  518. smp_store_release(&tail->commit, tail->used);
  519. }
  520. /**
  521. * tty_flip_buffer_push - push terminal buffers
  522. * @port: tty port to push
  523. *
  524. * Queue a push of the terminal flip buffers to the line discipline. Can be
  525. * called from IRQ/atomic context.
  526. *
  527. * In the event of the queue being busy for flipping the work will be held off
  528. * and retried later.
  529. */
  530. void tty_flip_buffer_push(struct tty_port *port)
  531. {
  532. struct tty_bufhead *buf = &port->buf;
  533. tty_flip_buffer_commit(buf->tail);
  534. queue_work(system_unbound_wq, &buf->work);
  535. }
  536. EXPORT_SYMBOL(tty_flip_buffer_push);
  537. /**
  538. * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
  539. * push
  540. * @port: tty port
  541. * @chars: characters
  542. * @size: size
  543. *
  544. * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
  545. * with the exception of properly holding the @port->lock.
  546. *
  547. * To be used only internally (by pty currently).
  548. *
  549. * Returns: the number added.
  550. */
  551. int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
  552. const unsigned char *chars, size_t size)
  553. {
  554. struct tty_bufhead *buf = &port->buf;
  555. unsigned long flags;
  556. spin_lock_irqsave(&port->lock, flags);
  557. size = tty_insert_flip_string(port, chars, size);
  558. if (size)
  559. tty_flip_buffer_commit(buf->tail);
  560. spin_unlock_irqrestore(&port->lock, flags);
  561. queue_work(system_unbound_wq, &buf->work);
  562. return size;
  563. }
  564. /**
  565. * tty_buffer_init - prepare a tty buffer structure
  566. * @port: tty port to initialise
  567. *
  568. * Set up the initial state of the buffer management for a tty device. Must be
  569. * called before the other tty buffer functions are used.
  570. */
  571. void tty_buffer_init(struct tty_port *port)
  572. {
  573. struct tty_bufhead *buf = &port->buf;
  574. mutex_init(&buf->lock);
  575. tty_buffer_reset(&buf->sentinel, 0);
  576. buf->head = &buf->sentinel;
  577. buf->tail = &buf->sentinel;
  578. init_llist_head(&buf->free);
  579. atomic_set(&buf->mem_used, 0);
  580. atomic_set(&buf->priority, 0);
  581. INIT_WORK(&buf->work, flush_to_ldisc);
  582. buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;
  583. }
  584. /**
  585. * tty_buffer_set_limit - change the tty buffer memory limit
  586. * @port: tty port to change
  587. * @limit: memory limit to set
  588. *
  589. * Change the tty buffer memory limit.
  590. *
  591. * Must be called before the other tty buffer functions are used.
  592. */
  593. int tty_buffer_set_limit(struct tty_port *port, int limit)
  594. {
  595. if (limit < MIN_TTYB_SIZE)
  596. return -EINVAL;
  597. port->buf.mem_limit = limit;
  598. return 0;
  599. }
  600. EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
  601. /* slave ptys can claim nested buffer lock when handling BRK and INTR */
  602. void tty_buffer_set_lock_subclass(struct tty_port *port)
  603. {
  604. lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE);
  605. }
  606. bool tty_buffer_restart_work(struct tty_port *port)
  607. {
  608. return queue_work(system_unbound_wq, &port->buf.work);
  609. }
  610. bool tty_buffer_cancel_work(struct tty_port *port)
  611. {
  612. return cancel_work_sync(&port->buf.work);
  613. }
  614. void tty_buffer_flush_work(struct tty_port *port)
  615. {
  616. flush_work(&port->buf.work);
  617. }