serial-tegra.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * serial_tegra.c
  4. *
  5. * High-speed serial driver for NVIDIA Tegra SoCs
  6. *
  7. * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
  8. *
  9. * Author: Laxman Dewangan <[email protected]>
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/err.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/serial.h>
  27. #include <linux/serial_8250.h>
  28. #include <linux/serial_core.h>
  29. #include <linux/serial_reg.h>
  30. #include <linux/slab.h>
  31. #include <linux/string.h>
  32. #include <linux/termios.h>
  33. #include <linux/tty.h>
  34. #include <linux/tty_flip.h>
  35. #define TEGRA_UART_TYPE "TEGRA_UART"
  36. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  37. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  38. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  39. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  40. #define TEGRA_UART_IER_EORD 0x20
  41. #define TEGRA_UART_MCR_RTS_EN 0x40
  42. #define TEGRA_UART_MCR_CTS_EN 0x20
  43. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  44. UART_LSR_PE | UART_LSR_FE)
  45. #define TEGRA_UART_IRDA_CSR 0x08
  46. #define TEGRA_UART_SIR_ENABLED 0x80
  47. #define TEGRA_UART_TX_PIO 1
  48. #define TEGRA_UART_TX_DMA 2
  49. #define TEGRA_UART_MIN_DMA 16
  50. #define TEGRA_UART_FIFO_SIZE 32
  51. /*
  52. * Tx fifo trigger level setting in tegra uart is in
  53. * reverse way then conventional uart.
  54. */
  55. #define TEGRA_UART_TX_TRIG_16B 0x00
  56. #define TEGRA_UART_TX_TRIG_8B 0x10
  57. #define TEGRA_UART_TX_TRIG_4B 0x20
  58. #define TEGRA_UART_TX_TRIG_1B 0x30
  59. #define TEGRA_UART_MAXIMUM 8
  60. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  61. #define TEGRA_UART_DEFAULT_BAUD 115200
  62. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  63. /* Tx transfer mode */
  64. #define TEGRA_TX_PIO 1
  65. #define TEGRA_TX_DMA 2
  66. #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
  67. /**
  68. * struct tegra_uart_chip_data: SOC specific data.
  69. *
  70. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  71. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  72. * Tegra30 does not allow this.
  73. * @support_clk_src_div: Clock source support the clock divider.
  74. * @fifo_mode_enable_status: Is FIFO mode enabled?
  75. * @uart_max_port: Maximum number of UART ports
  76. * @max_dma_burst_bytes: Maximum size of DMA bursts
  77. * @error_tolerance_low_range: Lowest number in the error tolerance range
  78. * @error_tolerance_high_range: Highest number in the error tolerance range
  79. */
  80. struct tegra_uart_chip_data {
  81. bool tx_fifo_full_status;
  82. bool allow_txfifo_reset_fifo_mode;
  83. bool support_clk_src_div;
  84. bool fifo_mode_enable_status;
  85. int uart_max_port;
  86. int max_dma_burst_bytes;
  87. int error_tolerance_low_range;
  88. int error_tolerance_high_range;
  89. };
  90. struct tegra_baud_tolerance {
  91. u32 lower_range_baud;
  92. u32 upper_range_baud;
  93. s32 tolerance;
  94. };
  95. struct tegra_uart_port {
  96. struct uart_port uport;
  97. const struct tegra_uart_chip_data *cdata;
  98. struct clk *uart_clk;
  99. struct reset_control *rst;
  100. unsigned int current_baud;
  101. /* Register shadow */
  102. unsigned long fcr_shadow;
  103. unsigned long mcr_shadow;
  104. unsigned long lcr_shadow;
  105. unsigned long ier_shadow;
  106. bool rts_active;
  107. int tx_in_progress;
  108. unsigned int tx_bytes;
  109. bool enable_modem_interrupt;
  110. bool rx_timeout;
  111. int rx_in_progress;
  112. int symb_bit;
  113. struct dma_chan *rx_dma_chan;
  114. struct dma_chan *tx_dma_chan;
  115. dma_addr_t rx_dma_buf_phys;
  116. dma_addr_t tx_dma_buf_phys;
  117. unsigned char *rx_dma_buf_virt;
  118. unsigned char *tx_dma_buf_virt;
  119. struct dma_async_tx_descriptor *tx_dma_desc;
  120. struct dma_async_tx_descriptor *rx_dma_desc;
  121. dma_cookie_t tx_cookie;
  122. dma_cookie_t rx_cookie;
  123. unsigned int tx_bytes_requested;
  124. unsigned int rx_bytes_requested;
  125. struct tegra_baud_tolerance *baud_tolerance;
  126. int n_adjustable_baud_rates;
  127. int required_rate;
  128. int configured_rate;
  129. bool use_rx_pio;
  130. bool use_tx_pio;
  131. bool rx_dma_active;
  132. };
  133. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  134. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  135. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  136. bool dma_to_memory);
  137. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  138. unsigned long reg)
  139. {
  140. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  141. }
  142. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  143. unsigned long reg)
  144. {
  145. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  146. }
  147. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  148. {
  149. return container_of(u, struct tegra_uart_port, uport);
  150. }
  151. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  152. {
  153. struct tegra_uart_port *tup = to_tegra_uport(u);
  154. /*
  155. * RI - Ring detector is active
  156. * CD/DCD/CAR - Carrier detect is always active. For some reason
  157. * linux has different names for carrier detect.
  158. * DSR - Data Set ready is active as the hardware doesn't support it.
  159. * Don't know if the linux support this yet?
  160. * CTS - Clear to send. Always set to active, as the hardware handles
  161. * CTS automatically.
  162. */
  163. if (tup->enable_modem_interrupt)
  164. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  165. return TIOCM_CTS;
  166. }
  167. static void set_rts(struct tegra_uart_port *tup, bool active)
  168. {
  169. unsigned long mcr;
  170. mcr = tup->mcr_shadow;
  171. if (active)
  172. mcr |= TEGRA_UART_MCR_RTS_EN;
  173. else
  174. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  175. if (mcr != tup->mcr_shadow) {
  176. tegra_uart_write(tup, mcr, UART_MCR);
  177. tup->mcr_shadow = mcr;
  178. }
  179. }
  180. static void set_dtr(struct tegra_uart_port *tup, bool active)
  181. {
  182. unsigned long mcr;
  183. mcr = tup->mcr_shadow;
  184. if (active)
  185. mcr |= UART_MCR_DTR;
  186. else
  187. mcr &= ~UART_MCR_DTR;
  188. if (mcr != tup->mcr_shadow) {
  189. tegra_uart_write(tup, mcr, UART_MCR);
  190. tup->mcr_shadow = mcr;
  191. }
  192. }
  193. static void set_loopbk(struct tegra_uart_port *tup, bool active)
  194. {
  195. unsigned long mcr = tup->mcr_shadow;
  196. if (active)
  197. mcr |= UART_MCR_LOOP;
  198. else
  199. mcr &= ~UART_MCR_LOOP;
  200. if (mcr != tup->mcr_shadow) {
  201. tegra_uart_write(tup, mcr, UART_MCR);
  202. tup->mcr_shadow = mcr;
  203. }
  204. }
  205. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  206. {
  207. struct tegra_uart_port *tup = to_tegra_uport(u);
  208. int enable;
  209. tup->rts_active = !!(mctrl & TIOCM_RTS);
  210. set_rts(tup, tup->rts_active);
  211. enable = !!(mctrl & TIOCM_DTR);
  212. set_dtr(tup, enable);
  213. enable = !!(mctrl & TIOCM_LOOP);
  214. set_loopbk(tup, enable);
  215. }
  216. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  217. {
  218. struct tegra_uart_port *tup = to_tegra_uport(u);
  219. unsigned long lcr;
  220. lcr = tup->lcr_shadow;
  221. if (break_ctl)
  222. lcr |= UART_LCR_SBC;
  223. else
  224. lcr &= ~UART_LCR_SBC;
  225. tegra_uart_write(tup, lcr, UART_LCR);
  226. tup->lcr_shadow = lcr;
  227. }
  228. /**
  229. * tegra_uart_wait_cycle_time: Wait for N UART clock periods
  230. *
  231. * @tup: Tegra serial port data structure.
  232. * @cycles: Number of clock periods to wait.
  233. *
  234. * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
  235. * clock speed is 16X the current baud rate.
  236. */
  237. static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
  238. unsigned int cycles)
  239. {
  240. if (tup->current_baud)
  241. udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
  242. }
  243. /* Wait for a symbol-time. */
  244. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  245. unsigned int syms)
  246. {
  247. if (tup->current_baud)
  248. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  249. tup->current_baud));
  250. }
  251. static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
  252. {
  253. unsigned long iir;
  254. unsigned int tmout = 100;
  255. do {
  256. iir = tegra_uart_read(tup, UART_IIR);
  257. if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
  258. return 0;
  259. udelay(1);
  260. } while (--tmout);
  261. return -ETIMEDOUT;
  262. }
  263. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  264. {
  265. unsigned long fcr = tup->fcr_shadow;
  266. unsigned int lsr, tmout = 10000;
  267. if (tup->rts_active)
  268. set_rts(tup, false);
  269. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  270. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  271. tegra_uart_write(tup, fcr, UART_FCR);
  272. } else {
  273. fcr &= ~UART_FCR_ENABLE_FIFO;
  274. tegra_uart_write(tup, fcr, UART_FCR);
  275. udelay(60);
  276. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  277. tegra_uart_write(tup, fcr, UART_FCR);
  278. fcr |= UART_FCR_ENABLE_FIFO;
  279. tegra_uart_write(tup, fcr, UART_FCR);
  280. if (tup->cdata->fifo_mode_enable_status)
  281. tegra_uart_wait_fifo_mode_enabled(tup);
  282. }
  283. /* Dummy read to ensure the write is posted */
  284. tegra_uart_read(tup, UART_SCR);
  285. /*
  286. * For all tegra devices (up to t210), there is a hardware issue that
  287. * requires software to wait for 32 UART clock periods for the flush
  288. * to propagate, otherwise data could be lost.
  289. */
  290. tegra_uart_wait_cycle_time(tup, 32);
  291. do {
  292. lsr = tegra_uart_read(tup, UART_LSR);
  293. if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
  294. break;
  295. udelay(1);
  296. } while (--tmout);
  297. if (tup->rts_active)
  298. set_rts(tup, true);
  299. }
  300. static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
  301. unsigned int baud, long rate)
  302. {
  303. int i;
  304. for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
  305. if (baud >= tup->baud_tolerance[i].lower_range_baud &&
  306. baud <= tup->baud_tolerance[i].upper_range_baud)
  307. return (rate + (rate *
  308. tup->baud_tolerance[i].tolerance) / 10000);
  309. }
  310. return rate;
  311. }
  312. static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
  313. {
  314. long diff;
  315. diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
  316. / tup->required_rate;
  317. if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
  318. diff > (tup->cdata->error_tolerance_high_range * 100)) {
  319. dev_err(tup->uport.dev,
  320. "configured baud rate is out of range by %ld", diff);
  321. return -EIO;
  322. }
  323. return 0;
  324. }
  325. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  326. {
  327. unsigned long rate;
  328. unsigned int divisor;
  329. unsigned long lcr;
  330. unsigned long flags;
  331. int ret;
  332. if (tup->current_baud == baud)
  333. return 0;
  334. if (tup->cdata->support_clk_src_div) {
  335. rate = baud * 16;
  336. tup->required_rate = rate;
  337. if (tup->n_adjustable_baud_rates)
  338. rate = tegra_get_tolerance_rate(tup, baud, rate);
  339. ret = clk_set_rate(tup->uart_clk, rate);
  340. if (ret < 0) {
  341. dev_err(tup->uport.dev,
  342. "clk_set_rate() failed for rate %lu\n", rate);
  343. return ret;
  344. }
  345. tup->configured_rate = clk_get_rate(tup->uart_clk);
  346. divisor = 1;
  347. ret = tegra_check_rate_in_range(tup);
  348. if (ret < 0)
  349. return ret;
  350. } else {
  351. rate = clk_get_rate(tup->uart_clk);
  352. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  353. }
  354. spin_lock_irqsave(&tup->uport.lock, flags);
  355. lcr = tup->lcr_shadow;
  356. lcr |= UART_LCR_DLAB;
  357. tegra_uart_write(tup, lcr, UART_LCR);
  358. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  359. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  360. lcr &= ~UART_LCR_DLAB;
  361. tegra_uart_write(tup, lcr, UART_LCR);
  362. /* Dummy read to ensure the write is posted */
  363. tegra_uart_read(tup, UART_SCR);
  364. spin_unlock_irqrestore(&tup->uport.lock, flags);
  365. tup->current_baud = baud;
  366. /* wait two character intervals at new rate */
  367. tegra_uart_wait_sym_time(tup, 2);
  368. return 0;
  369. }
  370. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  371. unsigned long lsr)
  372. {
  373. char flag = TTY_NORMAL;
  374. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  375. if (lsr & UART_LSR_OE) {
  376. /* Overrun error */
  377. flag = TTY_OVERRUN;
  378. tup->uport.icount.overrun++;
  379. dev_dbg(tup->uport.dev, "Got overrun errors\n");
  380. } else if (lsr & UART_LSR_PE) {
  381. /* Parity error */
  382. flag = TTY_PARITY;
  383. tup->uport.icount.parity++;
  384. dev_dbg(tup->uport.dev, "Got Parity errors\n");
  385. } else if (lsr & UART_LSR_FE) {
  386. flag = TTY_FRAME;
  387. tup->uport.icount.frame++;
  388. dev_dbg(tup->uport.dev, "Got frame errors\n");
  389. } else if (lsr & UART_LSR_BI) {
  390. /*
  391. * Break error
  392. * If FIFO read error without any data, reset Rx FIFO
  393. */
  394. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  395. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  396. if (tup->uport.ignore_status_mask & UART_LSR_BI)
  397. return TTY_BREAK;
  398. flag = TTY_BREAK;
  399. tup->uport.icount.brk++;
  400. dev_dbg(tup->uport.dev, "Got Break\n");
  401. }
  402. uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
  403. }
  404. return flag;
  405. }
  406. static int tegra_uart_request_port(struct uart_port *u)
  407. {
  408. return 0;
  409. }
  410. static void tegra_uart_release_port(struct uart_port *u)
  411. {
  412. /* Nothing to do here */
  413. }
  414. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  415. {
  416. struct circ_buf *xmit = &tup->uport.state->xmit;
  417. int i;
  418. for (i = 0; i < max_bytes; i++) {
  419. BUG_ON(uart_circ_empty(xmit));
  420. if (tup->cdata->tx_fifo_full_status) {
  421. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  422. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  423. break;
  424. }
  425. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  426. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  427. tup->uport.icount.tx++;
  428. }
  429. }
  430. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  431. unsigned int bytes)
  432. {
  433. if (bytes > TEGRA_UART_MIN_DMA)
  434. bytes = TEGRA_UART_MIN_DMA;
  435. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  436. tup->tx_bytes = bytes;
  437. tup->ier_shadow |= UART_IER_THRI;
  438. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  439. }
  440. static void tegra_uart_tx_dma_complete(void *args)
  441. {
  442. struct tegra_uart_port *tup = args;
  443. struct circ_buf *xmit = &tup->uport.state->xmit;
  444. struct dma_tx_state state;
  445. unsigned long flags;
  446. unsigned int count;
  447. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  448. count = tup->tx_bytes_requested - state.residue;
  449. async_tx_ack(tup->tx_dma_desc);
  450. spin_lock_irqsave(&tup->uport.lock, flags);
  451. uart_xmit_advance(&tup->uport, count);
  452. tup->tx_in_progress = 0;
  453. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  454. uart_write_wakeup(&tup->uport);
  455. tegra_uart_start_next_tx(tup);
  456. spin_unlock_irqrestore(&tup->uport.lock, flags);
  457. }
  458. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  459. unsigned long count)
  460. {
  461. struct circ_buf *xmit = &tup->uport.state->xmit;
  462. dma_addr_t tx_phys_addr;
  463. tup->tx_bytes = count & ~(0xF);
  464. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  465. dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
  466. tup->tx_bytes, DMA_TO_DEVICE);
  467. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  468. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  469. DMA_PREP_INTERRUPT);
  470. if (!tup->tx_dma_desc) {
  471. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  472. return -EIO;
  473. }
  474. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  475. tup->tx_dma_desc->callback_param = tup;
  476. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  477. tup->tx_bytes_requested = tup->tx_bytes;
  478. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  479. dma_async_issue_pending(tup->tx_dma_chan);
  480. return 0;
  481. }
  482. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  483. {
  484. unsigned long tail;
  485. unsigned long count;
  486. struct circ_buf *xmit = &tup->uport.state->xmit;
  487. if (!tup->current_baud)
  488. return;
  489. tail = (unsigned long)&xmit->buf[xmit->tail];
  490. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  491. if (!count)
  492. return;
  493. if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
  494. tegra_uart_start_pio_tx(tup, count);
  495. else if (BYTES_TO_ALIGN(tail) > 0)
  496. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  497. else
  498. tegra_uart_start_tx_dma(tup, count);
  499. }
  500. /* Called by serial core driver with u->lock taken. */
  501. static void tegra_uart_start_tx(struct uart_port *u)
  502. {
  503. struct tegra_uart_port *tup = to_tegra_uport(u);
  504. struct circ_buf *xmit = &u->state->xmit;
  505. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  506. tegra_uart_start_next_tx(tup);
  507. }
  508. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  509. {
  510. struct tegra_uart_port *tup = to_tegra_uport(u);
  511. unsigned int ret = 0;
  512. unsigned long flags;
  513. spin_lock_irqsave(&u->lock, flags);
  514. if (!tup->tx_in_progress) {
  515. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  516. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  517. ret = TIOCSER_TEMT;
  518. }
  519. spin_unlock_irqrestore(&u->lock, flags);
  520. return ret;
  521. }
  522. static void tegra_uart_stop_tx(struct uart_port *u)
  523. {
  524. struct tegra_uart_port *tup = to_tegra_uport(u);
  525. struct dma_tx_state state;
  526. unsigned int count;
  527. if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
  528. return;
  529. dmaengine_pause(tup->tx_dma_chan);
  530. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  531. dmaengine_terminate_all(tup->tx_dma_chan);
  532. count = tup->tx_bytes_requested - state.residue;
  533. async_tx_ack(tup->tx_dma_desc);
  534. uart_xmit_advance(&tup->uport, count);
  535. tup->tx_in_progress = 0;
  536. }
  537. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  538. {
  539. struct circ_buf *xmit = &tup->uport.state->xmit;
  540. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  541. tup->tx_in_progress = 0;
  542. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  543. uart_write_wakeup(&tup->uport);
  544. tegra_uart_start_next_tx(tup);
  545. }
  546. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  547. struct tty_port *port)
  548. {
  549. do {
  550. char flag = TTY_NORMAL;
  551. unsigned long lsr = 0;
  552. unsigned char ch;
  553. lsr = tegra_uart_read(tup, UART_LSR);
  554. if (!(lsr & UART_LSR_DR))
  555. break;
  556. flag = tegra_uart_decode_rx_error(tup, lsr);
  557. if (flag != TTY_NORMAL)
  558. continue;
  559. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  560. tup->uport.icount.rx++;
  561. if (uart_handle_sysrq_char(&tup->uport, ch))
  562. continue;
  563. if (tup->uport.ignore_status_mask & UART_LSR_DR)
  564. continue;
  565. tty_insert_flip_char(port, ch, flag);
  566. } while (1);
  567. }
  568. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  569. struct tty_port *port,
  570. unsigned int count)
  571. {
  572. int copied;
  573. /* If count is zero, then there is no data to be copied */
  574. if (!count)
  575. return;
  576. tup->uport.icount.rx += count;
  577. if (tup->uport.ignore_status_mask & UART_LSR_DR)
  578. return;
  579. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  580. count, DMA_FROM_DEVICE);
  581. copied = tty_insert_flip_string(port,
  582. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  583. if (copied != count) {
  584. WARN_ON(1);
  585. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  586. }
  587. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  588. count, DMA_TO_DEVICE);
  589. }
  590. static void do_handle_rx_pio(struct tegra_uart_port *tup)
  591. {
  592. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  593. struct tty_port *port = &tup->uport.state->port;
  594. tegra_uart_handle_rx_pio(tup, port);
  595. if (tty) {
  596. tty_flip_buffer_push(port);
  597. tty_kref_put(tty);
  598. }
  599. }
  600. static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
  601. unsigned int residue)
  602. {
  603. struct tty_port *port = &tup->uport.state->port;
  604. unsigned int count;
  605. async_tx_ack(tup->rx_dma_desc);
  606. count = tup->rx_bytes_requested - residue;
  607. /* If we are here, DMA is stopped */
  608. tegra_uart_copy_rx_to_tty(tup, port, count);
  609. do_handle_rx_pio(tup);
  610. }
  611. static void tegra_uart_rx_dma_complete(void *args)
  612. {
  613. struct tegra_uart_port *tup = args;
  614. struct uart_port *u = &tup->uport;
  615. unsigned long flags;
  616. struct dma_tx_state state;
  617. enum dma_status status;
  618. spin_lock_irqsave(&u->lock, flags);
  619. status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  620. if (status == DMA_IN_PROGRESS) {
  621. dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
  622. goto done;
  623. }
  624. /* Deactivate flow control to stop sender */
  625. if (tup->rts_active)
  626. set_rts(tup, false);
  627. tup->rx_dma_active = false;
  628. tegra_uart_rx_buffer_push(tup, 0);
  629. tegra_uart_start_rx_dma(tup);
  630. /* Activate flow control to start transfer */
  631. if (tup->rts_active)
  632. set_rts(tup, true);
  633. done:
  634. spin_unlock_irqrestore(&u->lock, flags);
  635. }
  636. static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
  637. {
  638. struct dma_tx_state state;
  639. if (!tup->rx_dma_active) {
  640. do_handle_rx_pio(tup);
  641. return;
  642. }
  643. dmaengine_pause(tup->rx_dma_chan);
  644. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  645. dmaengine_terminate_all(tup->rx_dma_chan);
  646. tegra_uart_rx_buffer_push(tup, state.residue);
  647. tup->rx_dma_active = false;
  648. }
  649. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  650. {
  651. /* Deactivate flow control to stop sender */
  652. if (tup->rts_active)
  653. set_rts(tup, false);
  654. tegra_uart_terminate_rx_dma(tup);
  655. if (tup->rts_active)
  656. set_rts(tup, true);
  657. }
  658. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  659. {
  660. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  661. if (tup->rx_dma_active)
  662. return 0;
  663. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  664. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  665. DMA_PREP_INTERRUPT);
  666. if (!tup->rx_dma_desc) {
  667. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  668. return -EIO;
  669. }
  670. tup->rx_dma_active = true;
  671. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  672. tup->rx_dma_desc->callback_param = tup;
  673. tup->rx_bytes_requested = count;
  674. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  675. dma_async_issue_pending(tup->rx_dma_chan);
  676. return 0;
  677. }
  678. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  679. {
  680. struct tegra_uart_port *tup = to_tegra_uport(u);
  681. unsigned long msr;
  682. msr = tegra_uart_read(tup, UART_MSR);
  683. if (!(msr & UART_MSR_ANY_DELTA))
  684. return;
  685. if (msr & UART_MSR_TERI)
  686. tup->uport.icount.rng++;
  687. if (msr & UART_MSR_DDSR)
  688. tup->uport.icount.dsr++;
  689. /* We may only get DDCD when HW init and reset */
  690. if (msr & UART_MSR_DDCD)
  691. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  692. /* Will start/stop_tx accordingly */
  693. if (msr & UART_MSR_DCTS)
  694. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  695. }
  696. static irqreturn_t tegra_uart_isr(int irq, void *data)
  697. {
  698. struct tegra_uart_port *tup = data;
  699. struct uart_port *u = &tup->uport;
  700. unsigned long iir;
  701. unsigned long ier;
  702. bool is_rx_start = false;
  703. bool is_rx_int = false;
  704. unsigned long flags;
  705. spin_lock_irqsave(&u->lock, flags);
  706. while (1) {
  707. iir = tegra_uart_read(tup, UART_IIR);
  708. if (iir & UART_IIR_NO_INT) {
  709. if (!tup->use_rx_pio && is_rx_int) {
  710. tegra_uart_handle_rx_dma(tup);
  711. if (tup->rx_in_progress) {
  712. ier = tup->ier_shadow;
  713. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  714. TEGRA_UART_IER_EORD | UART_IER_RDI);
  715. tup->ier_shadow = ier;
  716. tegra_uart_write(tup, ier, UART_IER);
  717. }
  718. } else if (is_rx_start) {
  719. tegra_uart_start_rx_dma(tup);
  720. }
  721. spin_unlock_irqrestore(&u->lock, flags);
  722. return IRQ_HANDLED;
  723. }
  724. switch ((iir >> 1) & 0x7) {
  725. case 0: /* Modem signal change interrupt */
  726. tegra_uart_handle_modem_signal_change(u);
  727. break;
  728. case 1: /* Transmit interrupt only triggered when using PIO */
  729. tup->ier_shadow &= ~UART_IER_THRI;
  730. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  731. tegra_uart_handle_tx_pio(tup);
  732. break;
  733. case 4: /* End of data */
  734. case 6: /* Rx timeout */
  735. if (!tup->use_rx_pio) {
  736. is_rx_int = tup->rx_in_progress;
  737. /* Disable Rx interrupts */
  738. ier = tup->ier_shadow;
  739. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  740. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  741. tup->ier_shadow = ier;
  742. tegra_uart_write(tup, ier, UART_IER);
  743. break;
  744. }
  745. fallthrough;
  746. case 2: /* Receive */
  747. if (!tup->use_rx_pio) {
  748. is_rx_start = tup->rx_in_progress;
  749. tup->ier_shadow &= ~UART_IER_RDI;
  750. tegra_uart_write(tup, tup->ier_shadow,
  751. UART_IER);
  752. } else {
  753. do_handle_rx_pio(tup);
  754. }
  755. break;
  756. case 3: /* Receive error */
  757. tegra_uart_decode_rx_error(tup,
  758. tegra_uart_read(tup, UART_LSR));
  759. break;
  760. case 5: /* break nothing to handle */
  761. case 7: /* break nothing to handle */
  762. break;
  763. }
  764. }
  765. }
  766. static void tegra_uart_stop_rx(struct uart_port *u)
  767. {
  768. struct tegra_uart_port *tup = to_tegra_uport(u);
  769. struct tty_port *port = &tup->uport.state->port;
  770. unsigned long ier;
  771. if (tup->rts_active)
  772. set_rts(tup, false);
  773. if (!tup->rx_in_progress)
  774. return;
  775. tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
  776. ier = tup->ier_shadow;
  777. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  778. TEGRA_UART_IER_EORD);
  779. tup->ier_shadow = ier;
  780. tegra_uart_write(tup, ier, UART_IER);
  781. tup->rx_in_progress = 0;
  782. if (!tup->use_rx_pio)
  783. tegra_uart_terminate_rx_dma(tup);
  784. else
  785. tegra_uart_handle_rx_pio(tup, port);
  786. }
  787. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  788. {
  789. unsigned long flags;
  790. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  791. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  792. unsigned long wait_time;
  793. unsigned long lsr;
  794. unsigned long msr;
  795. unsigned long mcr;
  796. /* Disable interrupts */
  797. tegra_uart_write(tup, 0, UART_IER);
  798. lsr = tegra_uart_read(tup, UART_LSR);
  799. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  800. msr = tegra_uart_read(tup, UART_MSR);
  801. mcr = tegra_uart_read(tup, UART_MCR);
  802. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  803. dev_err(tup->uport.dev,
  804. "Tx Fifo not empty, CTS disabled, waiting\n");
  805. /* Wait for Tx fifo to be empty */
  806. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  807. wait_time = min(fifo_empty_time, 100lu);
  808. udelay(wait_time);
  809. fifo_empty_time -= wait_time;
  810. if (!fifo_empty_time) {
  811. msr = tegra_uart_read(tup, UART_MSR);
  812. mcr = tegra_uart_read(tup, UART_MCR);
  813. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  814. (msr & UART_MSR_CTS))
  815. dev_err(tup->uport.dev,
  816. "Slave not ready\n");
  817. break;
  818. }
  819. lsr = tegra_uart_read(tup, UART_LSR);
  820. }
  821. }
  822. spin_lock_irqsave(&tup->uport.lock, flags);
  823. /* Reset the Rx and Tx FIFOs */
  824. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  825. tup->current_baud = 0;
  826. spin_unlock_irqrestore(&tup->uport.lock, flags);
  827. tup->rx_in_progress = 0;
  828. tup->tx_in_progress = 0;
  829. if (!tup->use_rx_pio)
  830. tegra_uart_dma_channel_free(tup, true);
  831. if (!tup->use_tx_pio)
  832. tegra_uart_dma_channel_free(tup, false);
  833. clk_disable_unprepare(tup->uart_clk);
  834. }
  835. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  836. {
  837. int ret;
  838. tup->fcr_shadow = 0;
  839. tup->mcr_shadow = 0;
  840. tup->lcr_shadow = 0;
  841. tup->ier_shadow = 0;
  842. tup->current_baud = 0;
  843. ret = clk_prepare_enable(tup->uart_clk);
  844. if (ret) {
  845. dev_err(tup->uport.dev, "could not enable clk\n");
  846. return ret;
  847. }
  848. /* Reset the UART controller to clear all previous status.*/
  849. reset_control_assert(tup->rst);
  850. udelay(10);
  851. reset_control_deassert(tup->rst);
  852. tup->rx_in_progress = 0;
  853. tup->tx_in_progress = 0;
  854. /*
  855. * Set the trigger level
  856. *
  857. * For PIO mode:
  858. *
  859. * For receive, this will interrupt the CPU after that many number of
  860. * bytes are received, for the remaining bytes the receive timeout
  861. * interrupt is received. Rx high watermark is set to 4.
  862. *
  863. * For transmit, if the trasnmit interrupt is enabled, this will
  864. * interrupt the CPU when the number of entries in the FIFO reaches the
  865. * low watermark. Tx low watermark is set to 16 bytes.
  866. *
  867. * For DMA mode:
  868. *
  869. * Set the Tx trigger to 16. This should match the DMA burst size that
  870. * programmed in the DMA registers.
  871. */
  872. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  873. if (tup->use_rx_pio) {
  874. tup->fcr_shadow |= UART_FCR_R_TRIG_11;
  875. } else {
  876. if (tup->cdata->max_dma_burst_bytes == 8)
  877. tup->fcr_shadow |= UART_FCR_R_TRIG_10;
  878. else
  879. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  880. }
  881. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  882. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  883. /* Dummy read to ensure the write is posted */
  884. tegra_uart_read(tup, UART_SCR);
  885. if (tup->cdata->fifo_mode_enable_status) {
  886. ret = tegra_uart_wait_fifo_mode_enabled(tup);
  887. if (ret < 0) {
  888. clk_disable_unprepare(tup->uart_clk);
  889. dev_err(tup->uport.dev,
  890. "Failed to enable FIFO mode: %d\n", ret);
  891. return ret;
  892. }
  893. } else {
  894. /*
  895. * For all tegra devices (up to t210), there is a hardware
  896. * issue that requires software to wait for 3 UART clock
  897. * periods after enabling the TX fifo, otherwise data could
  898. * be lost.
  899. */
  900. tegra_uart_wait_cycle_time(tup, 3);
  901. }
  902. /*
  903. * Initialize the UART with default configuration
  904. * (115200, N, 8, 1) so that the receive DMA buffer may be
  905. * enqueued
  906. */
  907. ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  908. if (ret < 0) {
  909. clk_disable_unprepare(tup->uart_clk);
  910. dev_err(tup->uport.dev, "Failed to set baud rate\n");
  911. return ret;
  912. }
  913. if (!tup->use_rx_pio) {
  914. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  915. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  916. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  917. } else {
  918. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  919. }
  920. tup->rx_in_progress = 1;
  921. /*
  922. * Enable IE_RXS for the receive status interrupts like line errors.
  923. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  924. *
  925. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  926. * the DATA is sitting in the FIFO and couldn't be transferred to the
  927. * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
  928. * triggered when there is a pause of the incomming data stream for 4
  929. * characters long.
  930. *
  931. * For pauses in the data which is not aligned to 4 bytes, we get
  932. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  933. * then the EORD.
  934. */
  935. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
  936. /*
  937. * If using DMA mode, enable EORD interrupt to notify about RX
  938. * completion.
  939. */
  940. if (!tup->use_rx_pio)
  941. tup->ier_shadow |= TEGRA_UART_IER_EORD;
  942. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  943. return 0;
  944. }
  945. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  946. bool dma_to_memory)
  947. {
  948. if (dma_to_memory) {
  949. dmaengine_terminate_all(tup->rx_dma_chan);
  950. dma_release_channel(tup->rx_dma_chan);
  951. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  952. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  953. tup->rx_dma_chan = NULL;
  954. tup->rx_dma_buf_phys = 0;
  955. tup->rx_dma_buf_virt = NULL;
  956. } else {
  957. dmaengine_terminate_all(tup->tx_dma_chan);
  958. dma_release_channel(tup->tx_dma_chan);
  959. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  960. UART_XMIT_SIZE, DMA_TO_DEVICE);
  961. tup->tx_dma_chan = NULL;
  962. tup->tx_dma_buf_phys = 0;
  963. tup->tx_dma_buf_virt = NULL;
  964. }
  965. }
  966. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  967. bool dma_to_memory)
  968. {
  969. struct dma_chan *dma_chan;
  970. unsigned char *dma_buf;
  971. dma_addr_t dma_phys;
  972. int ret;
  973. struct dma_slave_config dma_sconfig;
  974. dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
  975. if (IS_ERR(dma_chan)) {
  976. ret = PTR_ERR(dma_chan);
  977. dev_err(tup->uport.dev,
  978. "DMA channel alloc failed: %d\n", ret);
  979. return ret;
  980. }
  981. if (dma_to_memory) {
  982. dma_buf = dma_alloc_coherent(tup->uport.dev,
  983. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  984. &dma_phys, GFP_KERNEL);
  985. if (!dma_buf) {
  986. dev_err(tup->uport.dev,
  987. "Not able to allocate the dma buffer\n");
  988. dma_release_channel(dma_chan);
  989. return -ENOMEM;
  990. }
  991. dma_sync_single_for_device(tup->uport.dev, dma_phys,
  992. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  993. DMA_TO_DEVICE);
  994. dma_sconfig.src_addr = tup->uport.mapbase;
  995. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  996. dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
  997. tup->rx_dma_chan = dma_chan;
  998. tup->rx_dma_buf_virt = dma_buf;
  999. tup->rx_dma_buf_phys = dma_phys;
  1000. } else {
  1001. dma_phys = dma_map_single(tup->uport.dev,
  1002. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  1003. DMA_TO_DEVICE);
  1004. if (dma_mapping_error(tup->uport.dev, dma_phys)) {
  1005. dev_err(tup->uport.dev, "dma_map_single tx failed\n");
  1006. dma_release_channel(dma_chan);
  1007. return -ENOMEM;
  1008. }
  1009. dma_buf = tup->uport.state->xmit.buf;
  1010. dma_sconfig.dst_addr = tup->uport.mapbase;
  1011. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  1012. dma_sconfig.dst_maxburst = 16;
  1013. tup->tx_dma_chan = dma_chan;
  1014. tup->tx_dma_buf_virt = dma_buf;
  1015. tup->tx_dma_buf_phys = dma_phys;
  1016. }
  1017. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  1018. if (ret < 0) {
  1019. dev_err(tup->uport.dev,
  1020. "Dma slave config failed, err = %d\n", ret);
  1021. tegra_uart_dma_channel_free(tup, dma_to_memory);
  1022. return ret;
  1023. }
  1024. return 0;
  1025. }
  1026. static int tegra_uart_startup(struct uart_port *u)
  1027. {
  1028. struct tegra_uart_port *tup = to_tegra_uport(u);
  1029. int ret;
  1030. if (!tup->use_tx_pio) {
  1031. ret = tegra_uart_dma_channel_allocate(tup, false);
  1032. if (ret < 0) {
  1033. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
  1034. ret);
  1035. return ret;
  1036. }
  1037. }
  1038. if (!tup->use_rx_pio) {
  1039. ret = tegra_uart_dma_channel_allocate(tup, true);
  1040. if (ret < 0) {
  1041. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
  1042. ret);
  1043. goto fail_rx_dma;
  1044. }
  1045. }
  1046. ret = tegra_uart_hw_init(tup);
  1047. if (ret < 0) {
  1048. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  1049. goto fail_hw_init;
  1050. }
  1051. ret = request_irq(u->irq, tegra_uart_isr, 0,
  1052. dev_name(u->dev), tup);
  1053. if (ret < 0) {
  1054. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  1055. goto fail_request_irq;
  1056. }
  1057. return 0;
  1058. fail_request_irq:
  1059. /* tup->uart_clk is already enabled in tegra_uart_hw_init */
  1060. clk_disable_unprepare(tup->uart_clk);
  1061. fail_hw_init:
  1062. if (!tup->use_rx_pio)
  1063. tegra_uart_dma_channel_free(tup, true);
  1064. fail_rx_dma:
  1065. if (!tup->use_tx_pio)
  1066. tegra_uart_dma_channel_free(tup, false);
  1067. return ret;
  1068. }
  1069. /*
  1070. * Flush any TX data submitted for DMA and PIO. Called when the
  1071. * TX circular buffer is reset.
  1072. */
  1073. static void tegra_uart_flush_buffer(struct uart_port *u)
  1074. {
  1075. struct tegra_uart_port *tup = to_tegra_uport(u);
  1076. tup->tx_bytes = 0;
  1077. if (tup->tx_dma_chan)
  1078. dmaengine_terminate_all(tup->tx_dma_chan);
  1079. }
  1080. static void tegra_uart_shutdown(struct uart_port *u)
  1081. {
  1082. struct tegra_uart_port *tup = to_tegra_uport(u);
  1083. tegra_uart_hw_deinit(tup);
  1084. free_irq(u->irq, tup);
  1085. }
  1086. static void tegra_uart_enable_ms(struct uart_port *u)
  1087. {
  1088. struct tegra_uart_port *tup = to_tegra_uport(u);
  1089. if (tup->enable_modem_interrupt) {
  1090. tup->ier_shadow |= UART_IER_MSI;
  1091. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1092. }
  1093. }
  1094. static void tegra_uart_set_termios(struct uart_port *u,
  1095. struct ktermios *termios,
  1096. const struct ktermios *oldtermios)
  1097. {
  1098. struct tegra_uart_port *tup = to_tegra_uport(u);
  1099. unsigned int baud;
  1100. unsigned long flags;
  1101. unsigned int lcr;
  1102. unsigned char char_bits;
  1103. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  1104. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  1105. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  1106. int ret;
  1107. max_divider *= 16;
  1108. spin_lock_irqsave(&u->lock, flags);
  1109. /* Changing configuration, it is safe to stop any rx now */
  1110. if (tup->rts_active)
  1111. set_rts(tup, false);
  1112. /* Clear all interrupts as configuration is going to be changed */
  1113. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  1114. tegra_uart_read(tup, UART_IER);
  1115. tegra_uart_write(tup, 0, UART_IER);
  1116. tegra_uart_read(tup, UART_IER);
  1117. /* Parity */
  1118. lcr = tup->lcr_shadow;
  1119. lcr &= ~UART_LCR_PARITY;
  1120. /* CMSPAR isn't supported by this driver */
  1121. termios->c_cflag &= ~CMSPAR;
  1122. if ((termios->c_cflag & PARENB) == PARENB) {
  1123. if (termios->c_cflag & PARODD) {
  1124. lcr |= UART_LCR_PARITY;
  1125. lcr &= ~UART_LCR_EPAR;
  1126. lcr &= ~UART_LCR_SPAR;
  1127. } else {
  1128. lcr |= UART_LCR_PARITY;
  1129. lcr |= UART_LCR_EPAR;
  1130. lcr &= ~UART_LCR_SPAR;
  1131. }
  1132. }
  1133. char_bits = tty_get_char_size(termios->c_cflag);
  1134. lcr &= ~UART_LCR_WLEN8;
  1135. lcr |= UART_LCR_WLEN(char_bits);
  1136. /* Stop bits */
  1137. if (termios->c_cflag & CSTOPB)
  1138. lcr |= UART_LCR_STOP;
  1139. else
  1140. lcr &= ~UART_LCR_STOP;
  1141. tegra_uart_write(tup, lcr, UART_LCR);
  1142. tup->lcr_shadow = lcr;
  1143. tup->symb_bit = tty_get_frame_size(termios->c_cflag);
  1144. /* Baud rate. */
  1145. baud = uart_get_baud_rate(u, termios, oldtermios,
  1146. parent_clk_rate/max_divider,
  1147. parent_clk_rate/16);
  1148. spin_unlock_irqrestore(&u->lock, flags);
  1149. ret = tegra_set_baudrate(tup, baud);
  1150. if (ret < 0) {
  1151. dev_err(tup->uport.dev, "Failed to set baud rate\n");
  1152. return;
  1153. }
  1154. if (tty_termios_baud_rate(termios))
  1155. tty_termios_encode_baud_rate(termios, baud, baud);
  1156. spin_lock_irqsave(&u->lock, flags);
  1157. /* Flow control */
  1158. if (termios->c_cflag & CRTSCTS) {
  1159. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  1160. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1161. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1162. /* if top layer has asked to set rts active then do so here */
  1163. if (tup->rts_active)
  1164. set_rts(tup, true);
  1165. } else {
  1166. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  1167. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1168. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1169. }
  1170. /* update the port timeout based on new settings */
  1171. uart_update_timeout(u, termios->c_cflag, baud);
  1172. /* Make sure all writes have completed */
  1173. tegra_uart_read(tup, UART_IER);
  1174. /* Re-enable interrupt */
  1175. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1176. tegra_uart_read(tup, UART_IER);
  1177. tup->uport.ignore_status_mask = 0;
  1178. /* Ignore all characters if CREAD is not set */
  1179. if ((termios->c_cflag & CREAD) == 0)
  1180. tup->uport.ignore_status_mask |= UART_LSR_DR;
  1181. if (termios->c_iflag & IGNBRK)
  1182. tup->uport.ignore_status_mask |= UART_LSR_BI;
  1183. spin_unlock_irqrestore(&u->lock, flags);
  1184. }
  1185. static const char *tegra_uart_type(struct uart_port *u)
  1186. {
  1187. return TEGRA_UART_TYPE;
  1188. }
  1189. static const struct uart_ops tegra_uart_ops = {
  1190. .tx_empty = tegra_uart_tx_empty,
  1191. .set_mctrl = tegra_uart_set_mctrl,
  1192. .get_mctrl = tegra_uart_get_mctrl,
  1193. .stop_tx = tegra_uart_stop_tx,
  1194. .start_tx = tegra_uart_start_tx,
  1195. .stop_rx = tegra_uart_stop_rx,
  1196. .flush_buffer = tegra_uart_flush_buffer,
  1197. .enable_ms = tegra_uart_enable_ms,
  1198. .break_ctl = tegra_uart_break_ctl,
  1199. .startup = tegra_uart_startup,
  1200. .shutdown = tegra_uart_shutdown,
  1201. .set_termios = tegra_uart_set_termios,
  1202. .type = tegra_uart_type,
  1203. .request_port = tegra_uart_request_port,
  1204. .release_port = tegra_uart_release_port,
  1205. };
  1206. static struct uart_driver tegra_uart_driver = {
  1207. .owner = THIS_MODULE,
  1208. .driver_name = "tegra_hsuart",
  1209. .dev_name = "ttyTHS",
  1210. .cons = NULL,
  1211. .nr = TEGRA_UART_MAXIMUM,
  1212. };
  1213. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1214. struct tegra_uart_port *tup)
  1215. {
  1216. struct device_node *np = pdev->dev.of_node;
  1217. int port;
  1218. int ret;
  1219. int index;
  1220. u32 pval;
  1221. int count;
  1222. int n_entries;
  1223. port = of_alias_get_id(np, "serial");
  1224. if (port < 0) {
  1225. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1226. return port;
  1227. }
  1228. tup->uport.line = port;
  1229. tup->enable_modem_interrupt = of_property_read_bool(np,
  1230. "nvidia,enable-modem-interrupt");
  1231. index = of_property_match_string(np, "dma-names", "rx");
  1232. if (index < 0) {
  1233. tup->use_rx_pio = true;
  1234. dev_info(&pdev->dev, "RX in PIO mode\n");
  1235. }
  1236. index = of_property_match_string(np, "dma-names", "tx");
  1237. if (index < 0) {
  1238. tup->use_tx_pio = true;
  1239. dev_info(&pdev->dev, "TX in PIO mode\n");
  1240. }
  1241. n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
  1242. if (n_entries > 0) {
  1243. tup->n_adjustable_baud_rates = n_entries / 3;
  1244. tup->baud_tolerance =
  1245. devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
  1246. sizeof(*tup->baud_tolerance), GFP_KERNEL);
  1247. if (!tup->baud_tolerance)
  1248. return -ENOMEM;
  1249. for (count = 0, index = 0; count < n_entries; count += 3,
  1250. index++) {
  1251. ret =
  1252. of_property_read_u32_index(np,
  1253. "nvidia,adjust-baud-rates",
  1254. count, &pval);
  1255. if (!ret)
  1256. tup->baud_tolerance[index].lower_range_baud =
  1257. pval;
  1258. ret =
  1259. of_property_read_u32_index(np,
  1260. "nvidia,adjust-baud-rates",
  1261. count + 1, &pval);
  1262. if (!ret)
  1263. tup->baud_tolerance[index].upper_range_baud =
  1264. pval;
  1265. ret =
  1266. of_property_read_u32_index(np,
  1267. "nvidia,adjust-baud-rates",
  1268. count + 2, &pval);
  1269. if (!ret)
  1270. tup->baud_tolerance[index].tolerance =
  1271. (s32)pval;
  1272. }
  1273. } else {
  1274. tup->n_adjustable_baud_rates = 0;
  1275. }
  1276. return 0;
  1277. }
  1278. static struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1279. .tx_fifo_full_status = false,
  1280. .allow_txfifo_reset_fifo_mode = true,
  1281. .support_clk_src_div = false,
  1282. .fifo_mode_enable_status = false,
  1283. .uart_max_port = 5,
  1284. .max_dma_burst_bytes = 4,
  1285. .error_tolerance_low_range = -4,
  1286. .error_tolerance_high_range = 4,
  1287. };
  1288. static struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1289. .tx_fifo_full_status = true,
  1290. .allow_txfifo_reset_fifo_mode = false,
  1291. .support_clk_src_div = true,
  1292. .fifo_mode_enable_status = false,
  1293. .uart_max_port = 5,
  1294. .max_dma_burst_bytes = 4,
  1295. .error_tolerance_low_range = -4,
  1296. .error_tolerance_high_range = 4,
  1297. };
  1298. static struct tegra_uart_chip_data tegra186_uart_chip_data = {
  1299. .tx_fifo_full_status = true,
  1300. .allow_txfifo_reset_fifo_mode = false,
  1301. .support_clk_src_div = true,
  1302. .fifo_mode_enable_status = true,
  1303. .uart_max_port = 8,
  1304. .max_dma_burst_bytes = 8,
  1305. .error_tolerance_low_range = 0,
  1306. .error_tolerance_high_range = 4,
  1307. };
  1308. static struct tegra_uart_chip_data tegra194_uart_chip_data = {
  1309. .tx_fifo_full_status = true,
  1310. .allow_txfifo_reset_fifo_mode = false,
  1311. .support_clk_src_div = true,
  1312. .fifo_mode_enable_status = true,
  1313. .uart_max_port = 8,
  1314. .max_dma_burst_bytes = 8,
  1315. .error_tolerance_low_range = -2,
  1316. .error_tolerance_high_range = 2,
  1317. };
  1318. static const struct of_device_id tegra_uart_of_match[] = {
  1319. {
  1320. .compatible = "nvidia,tegra30-hsuart",
  1321. .data = &tegra30_uart_chip_data,
  1322. }, {
  1323. .compatible = "nvidia,tegra20-hsuart",
  1324. .data = &tegra20_uart_chip_data,
  1325. }, {
  1326. .compatible = "nvidia,tegra186-hsuart",
  1327. .data = &tegra186_uart_chip_data,
  1328. }, {
  1329. .compatible = "nvidia,tegra194-hsuart",
  1330. .data = &tegra194_uart_chip_data,
  1331. }, {
  1332. },
  1333. };
  1334. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1335. static int tegra_uart_probe(struct platform_device *pdev)
  1336. {
  1337. struct tegra_uart_port *tup;
  1338. struct uart_port *u;
  1339. struct resource *resource;
  1340. int ret;
  1341. const struct tegra_uart_chip_data *cdata;
  1342. cdata = of_device_get_match_data(&pdev->dev);
  1343. if (!cdata) {
  1344. dev_err(&pdev->dev, "Error: No device match found\n");
  1345. return -ENODEV;
  1346. }
  1347. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1348. if (!tup) {
  1349. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1350. return -ENOMEM;
  1351. }
  1352. ret = tegra_uart_parse_dt(pdev, tup);
  1353. if (ret < 0)
  1354. return ret;
  1355. u = &tup->uport;
  1356. u->dev = &pdev->dev;
  1357. u->ops = &tegra_uart_ops;
  1358. u->type = PORT_TEGRA;
  1359. u->fifosize = 32;
  1360. tup->cdata = cdata;
  1361. platform_set_drvdata(pdev, tup);
  1362. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1363. if (!resource) {
  1364. dev_err(&pdev->dev, "No IO memory resource\n");
  1365. return -ENODEV;
  1366. }
  1367. u->mapbase = resource->start;
  1368. u->membase = devm_ioremap_resource(&pdev->dev, resource);
  1369. if (IS_ERR(u->membase))
  1370. return PTR_ERR(u->membase);
  1371. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1372. if (IS_ERR(tup->uart_clk)) {
  1373. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1374. return PTR_ERR(tup->uart_clk);
  1375. }
  1376. tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
  1377. if (IS_ERR(tup->rst)) {
  1378. dev_err(&pdev->dev, "Couldn't get the reset\n");
  1379. return PTR_ERR(tup->rst);
  1380. }
  1381. u->iotype = UPIO_MEM32;
  1382. ret = platform_get_irq(pdev, 0);
  1383. if (ret < 0)
  1384. return ret;
  1385. u->irq = ret;
  1386. u->regshift = 2;
  1387. ret = uart_add_one_port(&tegra_uart_driver, u);
  1388. if (ret < 0) {
  1389. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1390. return ret;
  1391. }
  1392. return ret;
  1393. }
  1394. static int tegra_uart_remove(struct platform_device *pdev)
  1395. {
  1396. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1397. struct uart_port *u = &tup->uport;
  1398. uart_remove_one_port(&tegra_uart_driver, u);
  1399. return 0;
  1400. }
  1401. #ifdef CONFIG_PM_SLEEP
  1402. static int tegra_uart_suspend(struct device *dev)
  1403. {
  1404. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1405. struct uart_port *u = &tup->uport;
  1406. return uart_suspend_port(&tegra_uart_driver, u);
  1407. }
  1408. static int tegra_uart_resume(struct device *dev)
  1409. {
  1410. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1411. struct uart_port *u = &tup->uport;
  1412. return uart_resume_port(&tegra_uart_driver, u);
  1413. }
  1414. #endif
  1415. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1416. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1417. };
  1418. static struct platform_driver tegra_uart_platform_driver = {
  1419. .probe = tegra_uart_probe,
  1420. .remove = tegra_uart_remove,
  1421. .driver = {
  1422. .name = "serial-tegra",
  1423. .of_match_table = tegra_uart_of_match,
  1424. .pm = &tegra_uart_pm_ops,
  1425. },
  1426. };
  1427. static int __init tegra_uart_init(void)
  1428. {
  1429. int ret;
  1430. struct device_node *node;
  1431. const struct of_device_id *match = NULL;
  1432. const struct tegra_uart_chip_data *cdata = NULL;
  1433. node = of_find_matching_node(NULL, tegra_uart_of_match);
  1434. if (node)
  1435. match = of_match_node(tegra_uart_of_match, node);
  1436. of_node_put(node);
  1437. if (match)
  1438. cdata = match->data;
  1439. if (cdata)
  1440. tegra_uart_driver.nr = cdata->uart_max_port;
  1441. ret = uart_register_driver(&tegra_uart_driver);
  1442. if (ret < 0) {
  1443. pr_err("Could not register %s driver\n",
  1444. tegra_uart_driver.driver_name);
  1445. return ret;
  1446. }
  1447. ret = platform_driver_register(&tegra_uart_platform_driver);
  1448. if (ret < 0) {
  1449. pr_err("Uart platform driver register failed, e = %d\n", ret);
  1450. uart_unregister_driver(&tegra_uart_driver);
  1451. return ret;
  1452. }
  1453. return 0;
  1454. }
  1455. static void __exit tegra_uart_exit(void)
  1456. {
  1457. pr_info("Unloading tegra uart driver\n");
  1458. platform_driver_unregister(&tegra_uart_platform_driver);
  1459. uart_unregister_driver(&tegra_uart_driver);
  1460. }
  1461. module_init(tegra_uart_init);
  1462. module_exit(tegra_uart_exit);
  1463. MODULE_ALIAS("platform:serial-tegra");
  1464. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1465. MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
  1466. MODULE_LICENSE("GPL v2");