mvebu-uart.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * ***************************************************************************
  4. * Marvell Armada-3700 Serial Driver
  5. * Author: Wilson Ding <[email protected]>
  6. * Copyright (C) 2015 Marvell International Ltd.
  7. * ***************************************************************************
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/console.h>
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/init.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/math64.h>
  18. #include <linux/of.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_device.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/serial.h>
  25. #include <linux/serial_core.h>
  26. #include <linux/slab.h>
  27. #include <linux/tty.h>
  28. #include <linux/tty_flip.h>
  29. /* Register Map */
  30. #define UART_STD_RBR 0x00
  31. #define UART_EXT_RBR 0x18
  32. #define UART_STD_TSH 0x04
  33. #define UART_EXT_TSH 0x1C
  34. #define UART_STD_CTRL1 0x08
  35. #define UART_EXT_CTRL1 0x04
  36. #define CTRL_SOFT_RST BIT(31)
  37. #define CTRL_TXFIFO_RST BIT(15)
  38. #define CTRL_RXFIFO_RST BIT(14)
  39. #define CTRL_SND_BRK_SEQ BIT(11)
  40. #define CTRL_BRK_DET_INT BIT(3)
  41. #define CTRL_FRM_ERR_INT BIT(2)
  42. #define CTRL_PAR_ERR_INT BIT(1)
  43. #define CTRL_OVR_ERR_INT BIT(0)
  44. #define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
  45. CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
  46. #define UART_STD_CTRL2 UART_STD_CTRL1
  47. #define UART_EXT_CTRL2 0x20
  48. #define CTRL_STD_TX_RDY_INT BIT(5)
  49. #define CTRL_EXT_TX_RDY_INT BIT(6)
  50. #define CTRL_STD_RX_RDY_INT BIT(4)
  51. #define CTRL_EXT_RX_RDY_INT BIT(5)
  52. #define UART_STAT 0x0C
  53. #define STAT_TX_FIFO_EMP BIT(13)
  54. #define STAT_TX_FIFO_FUL BIT(11)
  55. #define STAT_TX_EMP BIT(6)
  56. #define STAT_STD_TX_RDY BIT(5)
  57. #define STAT_EXT_TX_RDY BIT(15)
  58. #define STAT_STD_RX_RDY BIT(4)
  59. #define STAT_EXT_RX_RDY BIT(14)
  60. #define STAT_BRK_DET BIT(3)
  61. #define STAT_FRM_ERR BIT(2)
  62. #define STAT_PAR_ERR BIT(1)
  63. #define STAT_OVR_ERR BIT(0)
  64. #define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \
  65. | STAT_PAR_ERR | STAT_OVR_ERR)
  66. /*
  67. * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART
  68. * Clock Control register controls UART1 and bit 20 controls UART2. But in
  69. * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an
  70. * error in Marvell's documentation. Hence following CLK_DIS macros are swapped.
  71. */
  72. #define UART_BRDV 0x10
  73. /* These bits are located in UART1 address space and control UART2 */
  74. #define UART2_CLK_DIS BIT(21)
  75. /* These bits are located in UART1 address space and control UART1 */
  76. #define UART1_CLK_DIS BIT(20)
  77. /* These bits are located in UART1 address space and control both UARTs */
  78. #define CLK_NO_XTAL BIT(19)
  79. #define CLK_TBG_DIV1_SHIFT 15
  80. #define CLK_TBG_DIV1_MASK 0x7
  81. #define CLK_TBG_DIV1_MAX 6
  82. #define CLK_TBG_DIV2_SHIFT 12
  83. #define CLK_TBG_DIV2_MASK 0x7
  84. #define CLK_TBG_DIV2_MAX 6
  85. #define CLK_TBG_SEL_SHIFT 10
  86. #define CLK_TBG_SEL_MASK 0x3
  87. /* These bits are located in both UARTs address space */
  88. #define BRDV_BAUD_MASK 0x3FF
  89. #define BRDV_BAUD_MAX BRDV_BAUD_MASK
  90. #define UART_OSAMP 0x14
  91. #define OSAMP_DEFAULT_DIVISOR 16
  92. #define OSAMP_DIVISORS_MASK 0x3F3F3F3F
  93. #define OSAMP_MAX_DIVISOR 63
  94. #define MVEBU_NR_UARTS 2
  95. #define MVEBU_UART_TYPE "mvebu-uart"
  96. #define DRIVER_NAME "mvebu_serial"
  97. enum {
  98. /* Either there is only one summed IRQ... */
  99. UART_IRQ_SUM = 0,
  100. /* ...or there are two separate IRQ for RX and TX */
  101. UART_RX_IRQ = 0,
  102. UART_TX_IRQ,
  103. UART_IRQ_COUNT
  104. };
  105. /* Diverging register offsets */
  106. struct uart_regs_layout {
  107. unsigned int rbr;
  108. unsigned int tsh;
  109. unsigned int ctrl;
  110. unsigned int intr;
  111. };
  112. /* Diverging flags */
  113. struct uart_flags {
  114. unsigned int ctrl_tx_rdy_int;
  115. unsigned int ctrl_rx_rdy_int;
  116. unsigned int stat_tx_rdy;
  117. unsigned int stat_rx_rdy;
  118. };
  119. /* Driver data, a structure for each UART port */
  120. struct mvebu_uart_driver_data {
  121. bool is_ext;
  122. struct uart_regs_layout regs;
  123. struct uart_flags flags;
  124. };
  125. /* Saved registers during suspend */
  126. struct mvebu_uart_pm_regs {
  127. unsigned int rbr;
  128. unsigned int tsh;
  129. unsigned int ctrl;
  130. unsigned int intr;
  131. unsigned int stat;
  132. unsigned int brdv;
  133. unsigned int osamp;
  134. };
  135. /* MVEBU UART driver structure */
  136. struct mvebu_uart {
  137. struct uart_port *port;
  138. struct clk *clk;
  139. int irq[UART_IRQ_COUNT];
  140. struct mvebu_uart_driver_data *data;
  141. #if defined(CONFIG_PM)
  142. struct mvebu_uart_pm_regs pm_regs;
  143. #endif /* CONFIG_PM */
  144. };
  145. static struct mvebu_uart *to_mvuart(struct uart_port *port)
  146. {
  147. return (struct mvebu_uart *)port->private_data;
  148. }
  149. #define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
  150. #define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
  151. #define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
  152. #define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
  153. #define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
  154. #define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
  155. #define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
  156. #define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
  157. #define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
  158. static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
  159. static DEFINE_SPINLOCK(mvebu_uart_lock);
  160. /* Core UART Driver Operations */
  161. static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
  162. {
  163. unsigned long flags;
  164. unsigned int st;
  165. spin_lock_irqsave(&port->lock, flags);
  166. st = readl(port->membase + UART_STAT);
  167. spin_unlock_irqrestore(&port->lock, flags);
  168. return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
  169. }
  170. static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
  171. {
  172. return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
  173. }
  174. static void mvebu_uart_set_mctrl(struct uart_port *port,
  175. unsigned int mctrl)
  176. {
  177. /*
  178. * Even if we do not support configuring the modem control lines, this
  179. * function must be proided to the serial core
  180. */
  181. }
  182. static void mvebu_uart_stop_tx(struct uart_port *port)
  183. {
  184. unsigned int ctl = readl(port->membase + UART_INTR(port));
  185. ctl &= ~CTRL_TX_RDY_INT(port);
  186. writel(ctl, port->membase + UART_INTR(port));
  187. }
  188. static void mvebu_uart_start_tx(struct uart_port *port)
  189. {
  190. unsigned int ctl;
  191. struct circ_buf *xmit = &port->state->xmit;
  192. if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
  193. writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
  194. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  195. port->icount.tx++;
  196. }
  197. ctl = readl(port->membase + UART_INTR(port));
  198. ctl |= CTRL_TX_RDY_INT(port);
  199. writel(ctl, port->membase + UART_INTR(port));
  200. }
  201. static void mvebu_uart_stop_rx(struct uart_port *port)
  202. {
  203. unsigned int ctl;
  204. ctl = readl(port->membase + UART_CTRL(port));
  205. ctl &= ~CTRL_BRK_INT;
  206. writel(ctl, port->membase + UART_CTRL(port));
  207. ctl = readl(port->membase + UART_INTR(port));
  208. ctl &= ~CTRL_RX_RDY_INT(port);
  209. writel(ctl, port->membase + UART_INTR(port));
  210. }
  211. static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
  212. {
  213. unsigned int ctl;
  214. unsigned long flags;
  215. spin_lock_irqsave(&port->lock, flags);
  216. ctl = readl(port->membase + UART_CTRL(port));
  217. if (brk == -1)
  218. ctl |= CTRL_SND_BRK_SEQ;
  219. else
  220. ctl &= ~CTRL_SND_BRK_SEQ;
  221. writel(ctl, port->membase + UART_CTRL(port));
  222. spin_unlock_irqrestore(&port->lock, flags);
  223. }
  224. static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
  225. {
  226. struct tty_port *tport = &port->state->port;
  227. unsigned char ch = 0;
  228. char flag = 0;
  229. int ret;
  230. do {
  231. if (status & STAT_RX_RDY(port)) {
  232. ch = readl(port->membase + UART_RBR(port));
  233. ch &= 0xff;
  234. flag = TTY_NORMAL;
  235. port->icount.rx++;
  236. if (status & STAT_PAR_ERR)
  237. port->icount.parity++;
  238. }
  239. /*
  240. * For UART2, error bits are not cleared on buffer read.
  241. * This causes interrupt loop and system hang.
  242. */
  243. if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
  244. ret = readl(port->membase + UART_STAT);
  245. ret |= STAT_BRK_ERR;
  246. writel(ret, port->membase + UART_STAT);
  247. }
  248. if (status & STAT_BRK_DET) {
  249. port->icount.brk++;
  250. status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
  251. if (uart_handle_break(port))
  252. goto ignore_char;
  253. }
  254. if (status & STAT_OVR_ERR)
  255. port->icount.overrun++;
  256. if (status & STAT_FRM_ERR)
  257. port->icount.frame++;
  258. if (uart_handle_sysrq_char(port, ch))
  259. goto ignore_char;
  260. if (status & port->ignore_status_mask & STAT_PAR_ERR)
  261. status &= ~STAT_RX_RDY(port);
  262. status &= port->read_status_mask;
  263. if (status & STAT_PAR_ERR)
  264. flag = TTY_PARITY;
  265. status &= ~port->ignore_status_mask;
  266. if (status & STAT_RX_RDY(port))
  267. tty_insert_flip_char(tport, ch, flag);
  268. if (status & STAT_BRK_DET)
  269. tty_insert_flip_char(tport, 0, TTY_BREAK);
  270. if (status & STAT_FRM_ERR)
  271. tty_insert_flip_char(tport, 0, TTY_FRAME);
  272. if (status & STAT_OVR_ERR)
  273. tty_insert_flip_char(tport, 0, TTY_OVERRUN);
  274. ignore_char:
  275. status = readl(port->membase + UART_STAT);
  276. } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
  277. tty_flip_buffer_push(tport);
  278. }
  279. static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
  280. {
  281. struct circ_buf *xmit = &port->state->xmit;
  282. unsigned int count;
  283. unsigned int st;
  284. if (port->x_char) {
  285. writel(port->x_char, port->membase + UART_TSH(port));
  286. port->icount.tx++;
  287. port->x_char = 0;
  288. return;
  289. }
  290. if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
  291. mvebu_uart_stop_tx(port);
  292. return;
  293. }
  294. for (count = 0; count < port->fifosize; count++) {
  295. writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
  296. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  297. port->icount.tx++;
  298. if (uart_circ_empty(xmit))
  299. break;
  300. st = readl(port->membase + UART_STAT);
  301. if (st & STAT_TX_FIFO_FUL)
  302. break;
  303. }
  304. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  305. uart_write_wakeup(port);
  306. if (uart_circ_empty(xmit))
  307. mvebu_uart_stop_tx(port);
  308. }
  309. static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
  310. {
  311. struct uart_port *port = (struct uart_port *)dev_id;
  312. unsigned int st = readl(port->membase + UART_STAT);
  313. if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
  314. STAT_BRK_DET))
  315. mvebu_uart_rx_chars(port, st);
  316. if (st & STAT_TX_RDY(port))
  317. mvebu_uart_tx_chars(port, st);
  318. return IRQ_HANDLED;
  319. }
  320. static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
  321. {
  322. struct uart_port *port = (struct uart_port *)dev_id;
  323. unsigned int st = readl(port->membase + UART_STAT);
  324. if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
  325. STAT_BRK_DET))
  326. mvebu_uart_rx_chars(port, st);
  327. return IRQ_HANDLED;
  328. }
  329. static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
  330. {
  331. struct uart_port *port = (struct uart_port *)dev_id;
  332. unsigned int st = readl(port->membase + UART_STAT);
  333. if (st & STAT_TX_RDY(port))
  334. mvebu_uart_tx_chars(port, st);
  335. return IRQ_HANDLED;
  336. }
  337. static int mvebu_uart_startup(struct uart_port *port)
  338. {
  339. struct mvebu_uart *mvuart = to_mvuart(port);
  340. unsigned int ctl;
  341. int ret;
  342. writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
  343. port->membase + UART_CTRL(port));
  344. udelay(1);
  345. /* Clear the error bits of state register before IRQ request */
  346. ret = readl(port->membase + UART_STAT);
  347. ret |= STAT_BRK_ERR;
  348. writel(ret, port->membase + UART_STAT);
  349. writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
  350. ctl = readl(port->membase + UART_INTR(port));
  351. ctl |= CTRL_RX_RDY_INT(port);
  352. writel(ctl, port->membase + UART_INTR(port));
  353. if (!mvuart->irq[UART_TX_IRQ]) {
  354. /* Old bindings with just one interrupt (UART0 only) */
  355. ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
  356. mvebu_uart_isr, port->irqflags,
  357. dev_name(port->dev), port);
  358. if (ret) {
  359. dev_err(port->dev, "unable to request IRQ %d\n",
  360. mvuart->irq[UART_IRQ_SUM]);
  361. return ret;
  362. }
  363. } else {
  364. /* New bindings with an IRQ for RX and TX (both UART) */
  365. ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
  366. mvebu_uart_rx_isr, port->irqflags,
  367. dev_name(port->dev), port);
  368. if (ret) {
  369. dev_err(port->dev, "unable to request IRQ %d\n",
  370. mvuart->irq[UART_RX_IRQ]);
  371. return ret;
  372. }
  373. ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
  374. mvebu_uart_tx_isr, port->irqflags,
  375. dev_name(port->dev),
  376. port);
  377. if (ret) {
  378. dev_err(port->dev, "unable to request IRQ %d\n",
  379. mvuart->irq[UART_TX_IRQ]);
  380. devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
  381. port);
  382. return ret;
  383. }
  384. }
  385. return 0;
  386. }
  387. static void mvebu_uart_shutdown(struct uart_port *port)
  388. {
  389. struct mvebu_uart *mvuart = to_mvuart(port);
  390. writel(0, port->membase + UART_INTR(port));
  391. if (!mvuart->irq[UART_TX_IRQ]) {
  392. devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
  393. } else {
  394. devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
  395. devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
  396. }
  397. }
  398. static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
  399. {
  400. unsigned int d_divisor, m_divisor;
  401. unsigned long flags;
  402. u32 brdv, osamp;
  403. if (!port->uartclk)
  404. return 0;
  405. /*
  406. * The baudrate is derived from the UART clock thanks to divisors:
  407. * > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6
  408. * > D ("baud generator"): can divide the clock from 1 to 1023
  409. * > M ("fractional divisor"): allows a better accuracy (from 1 to 63)
  410. *
  411. * Exact formulas for calculating baudrate:
  412. *
  413. * with default x16 scheme:
  414. * baudrate = xtal / (d * 16)
  415. * baudrate = tbg / (d1 * d2 * d * 16)
  416. *
  417. * with fractional divisor:
  418. * baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4)))
  419. * baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4)))
  420. *
  421. * Oversampling value:
  422. * osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24);
  423. *
  424. * Where m1 controls number of clock cycles per bit for bits 1,2,3;
  425. * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10.
  426. *
  427. * To simplify baudrate setup set all the M prescalers to the same
  428. * value. For baudrates 9600 Bd and higher, it is enough to use the
  429. * default (x16) divisor or fractional divisor with M = 63, so there
  430. * is no need to use real fractional support (where the M prescalers
  431. * are not equal).
  432. *
  433. * When all the M prescalers are zeroed then default (x16) divisor is
  434. * used. Default x16 scheme is more stable than M (fractional divisor),
  435. * so use M only when D divisor is not enough to derive baudrate.
  436. *
  437. * Member port->uartclk is either xtal clock rate or TBG clock rate
  438. * divided by (d1 * d2). So d1 and d2 are already set by the UART clock
  439. * driver (and UART driver itself cannot change them). Moreover they are
  440. * shared between both UARTs.
  441. */
  442. m_divisor = OSAMP_DEFAULT_DIVISOR;
  443. d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
  444. if (d_divisor > BRDV_BAUD_MAX) {
  445. /*
  446. * Experiments show that small M divisors are unstable.
  447. * Use maximal possible M = 63 and calculate D divisor.
  448. */
  449. m_divisor = OSAMP_MAX_DIVISOR;
  450. d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
  451. }
  452. if (d_divisor < 1)
  453. d_divisor = 1;
  454. else if (d_divisor > BRDV_BAUD_MAX)
  455. d_divisor = BRDV_BAUD_MAX;
  456. spin_lock_irqsave(&mvebu_uart_lock, flags);
  457. brdv = readl(port->membase + UART_BRDV);
  458. brdv &= ~BRDV_BAUD_MASK;
  459. brdv |= d_divisor;
  460. writel(brdv, port->membase + UART_BRDV);
  461. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  462. osamp = readl(port->membase + UART_OSAMP);
  463. osamp &= ~OSAMP_DIVISORS_MASK;
  464. if (m_divisor != OSAMP_DEFAULT_DIVISOR)
  465. osamp |= (m_divisor << 0) | (m_divisor << 8) |
  466. (m_divisor << 16) | (m_divisor << 24);
  467. writel(osamp, port->membase + UART_OSAMP);
  468. return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
  469. }
  470. static void mvebu_uart_set_termios(struct uart_port *port,
  471. struct ktermios *termios,
  472. const struct ktermios *old)
  473. {
  474. unsigned long flags;
  475. unsigned int baud, min_baud, max_baud;
  476. spin_lock_irqsave(&port->lock, flags);
  477. port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
  478. STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
  479. if (termios->c_iflag & INPCK)
  480. port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
  481. port->ignore_status_mask = 0;
  482. if (termios->c_iflag & IGNPAR)
  483. port->ignore_status_mask |=
  484. STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
  485. if ((termios->c_cflag & CREAD) == 0)
  486. port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
  487. /*
  488. * Maximal divisor is 1023 and maximal fractional divisor is 63. And
  489. * experiments show that baudrates above 1/80 of parent clock rate are
  490. * not stable. So disallow baudrates above 1/80 of the parent clock
  491. * rate. If port->uartclk is not available, then
  492. * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud
  493. * in this case do not matter.
  494. */
  495. min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX *
  496. OSAMP_MAX_DIVISOR);
  497. max_baud = port->uartclk / 80;
  498. baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
  499. baud = mvebu_uart_baud_rate_set(port, baud);
  500. /* In case baudrate cannot be changed, report previous old value */
  501. if (baud == 0 && old)
  502. baud = tty_termios_baud_rate(old);
  503. /* Only the following flag changes are supported */
  504. if (old) {
  505. termios->c_iflag &= INPCK | IGNPAR;
  506. termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
  507. termios->c_cflag &= CREAD | CBAUD;
  508. termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
  509. termios->c_cflag |= CS8;
  510. }
  511. if (baud != 0) {
  512. tty_termios_encode_baud_rate(termios, baud, baud);
  513. uart_update_timeout(port, termios->c_cflag, baud);
  514. }
  515. spin_unlock_irqrestore(&port->lock, flags);
  516. }
  517. static const char *mvebu_uart_type(struct uart_port *port)
  518. {
  519. return MVEBU_UART_TYPE;
  520. }
  521. static void mvebu_uart_release_port(struct uart_port *port)
  522. {
  523. /* Nothing to do here */
  524. }
  525. static int mvebu_uart_request_port(struct uart_port *port)
  526. {
  527. return 0;
  528. }
  529. #ifdef CONFIG_CONSOLE_POLL
  530. static int mvebu_uart_get_poll_char(struct uart_port *port)
  531. {
  532. unsigned int st = readl(port->membase + UART_STAT);
  533. if (!(st & STAT_RX_RDY(port)))
  534. return NO_POLL_CHAR;
  535. return readl(port->membase + UART_RBR(port));
  536. }
  537. static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
  538. {
  539. unsigned int st;
  540. for (;;) {
  541. st = readl(port->membase + UART_STAT);
  542. if (!(st & STAT_TX_FIFO_FUL))
  543. break;
  544. udelay(1);
  545. }
  546. writel(c, port->membase + UART_TSH(port));
  547. }
  548. #endif
  549. static const struct uart_ops mvebu_uart_ops = {
  550. .tx_empty = mvebu_uart_tx_empty,
  551. .set_mctrl = mvebu_uart_set_mctrl,
  552. .get_mctrl = mvebu_uart_get_mctrl,
  553. .stop_tx = mvebu_uart_stop_tx,
  554. .start_tx = mvebu_uart_start_tx,
  555. .stop_rx = mvebu_uart_stop_rx,
  556. .break_ctl = mvebu_uart_break_ctl,
  557. .startup = mvebu_uart_startup,
  558. .shutdown = mvebu_uart_shutdown,
  559. .set_termios = mvebu_uart_set_termios,
  560. .type = mvebu_uart_type,
  561. .release_port = mvebu_uart_release_port,
  562. .request_port = mvebu_uart_request_port,
  563. #ifdef CONFIG_CONSOLE_POLL
  564. .poll_get_char = mvebu_uart_get_poll_char,
  565. .poll_put_char = mvebu_uart_put_poll_char,
  566. #endif
  567. };
  568. /* Console Driver Operations */
  569. #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
  570. /* Early Console */
  571. static void mvebu_uart_putc(struct uart_port *port, unsigned char c)
  572. {
  573. unsigned int st;
  574. for (;;) {
  575. st = readl(port->membase + UART_STAT);
  576. if (!(st & STAT_TX_FIFO_FUL))
  577. break;
  578. }
  579. /* At early stage, DT is not parsed yet, only use UART0 */
  580. writel(c, port->membase + UART_STD_TSH);
  581. for (;;) {
  582. st = readl(port->membase + UART_STAT);
  583. if (st & STAT_TX_FIFO_EMP)
  584. break;
  585. }
  586. }
  587. static void mvebu_uart_putc_early_write(struct console *con,
  588. const char *s,
  589. unsigned int n)
  590. {
  591. struct earlycon_device *dev = con->data;
  592. uart_console_write(&dev->port, s, n, mvebu_uart_putc);
  593. }
  594. static int __init
  595. mvebu_uart_early_console_setup(struct earlycon_device *device,
  596. const char *opt)
  597. {
  598. if (!device->port.membase)
  599. return -ENODEV;
  600. device->con->write = mvebu_uart_putc_early_write;
  601. return 0;
  602. }
  603. EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
  604. OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
  605. mvebu_uart_early_console_setup);
  606. static void wait_for_xmitr(struct uart_port *port)
  607. {
  608. u32 val;
  609. readl_poll_timeout_atomic(port->membase + UART_STAT, val,
  610. (val & STAT_TX_RDY(port)), 1, 10000);
  611. }
  612. static void wait_for_xmite(struct uart_port *port)
  613. {
  614. u32 val;
  615. readl_poll_timeout_atomic(port->membase + UART_STAT, val,
  616. (val & STAT_TX_EMP), 1, 10000);
  617. }
  618. static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch)
  619. {
  620. wait_for_xmitr(port);
  621. writel(ch, port->membase + UART_TSH(port));
  622. }
  623. static void mvebu_uart_console_write(struct console *co, const char *s,
  624. unsigned int count)
  625. {
  626. struct uart_port *port = &mvebu_uart_ports[co->index];
  627. unsigned long flags;
  628. unsigned int ier, intr, ctl;
  629. int locked = 1;
  630. if (oops_in_progress)
  631. locked = spin_trylock_irqsave(&port->lock, flags);
  632. else
  633. spin_lock_irqsave(&port->lock, flags);
  634. ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
  635. intr = readl(port->membase + UART_INTR(port)) &
  636. (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
  637. writel(0, port->membase + UART_CTRL(port));
  638. writel(0, port->membase + UART_INTR(port));
  639. uart_console_write(port, s, count, mvebu_uart_console_putchar);
  640. wait_for_xmite(port);
  641. if (ier)
  642. writel(ier, port->membase + UART_CTRL(port));
  643. if (intr) {
  644. ctl = intr | readl(port->membase + UART_INTR(port));
  645. writel(ctl, port->membase + UART_INTR(port));
  646. }
  647. if (locked)
  648. spin_unlock_irqrestore(&port->lock, flags);
  649. }
  650. static int mvebu_uart_console_setup(struct console *co, char *options)
  651. {
  652. struct uart_port *port;
  653. int baud = 9600;
  654. int bits = 8;
  655. int parity = 'n';
  656. int flow = 'n';
  657. if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
  658. return -EINVAL;
  659. port = &mvebu_uart_ports[co->index];
  660. if (!port->mapbase || !port->membase) {
  661. pr_debug("console on ttyMV%i not present\n", co->index);
  662. return -ENODEV;
  663. }
  664. if (options)
  665. uart_parse_options(options, &baud, &parity, &bits, &flow);
  666. return uart_set_options(port, co, baud, parity, bits, flow);
  667. }
  668. static struct uart_driver mvebu_uart_driver;
  669. static struct console mvebu_uart_console = {
  670. .name = "ttyMV",
  671. .write = mvebu_uart_console_write,
  672. .device = uart_console_device,
  673. .setup = mvebu_uart_console_setup,
  674. .flags = CON_PRINTBUFFER,
  675. .index = -1,
  676. .data = &mvebu_uart_driver,
  677. };
  678. static int __init mvebu_uart_console_init(void)
  679. {
  680. register_console(&mvebu_uart_console);
  681. return 0;
  682. }
  683. console_initcall(mvebu_uart_console_init);
  684. #endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
  685. static struct uart_driver mvebu_uart_driver = {
  686. .owner = THIS_MODULE,
  687. .driver_name = DRIVER_NAME,
  688. .dev_name = "ttyMV",
  689. .nr = MVEBU_NR_UARTS,
  690. #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
  691. .cons = &mvebu_uart_console,
  692. #endif
  693. };
  694. #if defined(CONFIG_PM)
  695. static int mvebu_uart_suspend(struct device *dev)
  696. {
  697. struct mvebu_uart *mvuart = dev_get_drvdata(dev);
  698. struct uart_port *port = mvuart->port;
  699. unsigned long flags;
  700. uart_suspend_port(&mvebu_uart_driver, port);
  701. mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port));
  702. mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port));
  703. mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port));
  704. mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port));
  705. mvuart->pm_regs.stat = readl(port->membase + UART_STAT);
  706. spin_lock_irqsave(&mvebu_uart_lock, flags);
  707. mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV);
  708. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  709. mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP);
  710. device_set_wakeup_enable(dev, true);
  711. return 0;
  712. }
  713. static int mvebu_uart_resume(struct device *dev)
  714. {
  715. struct mvebu_uart *mvuart = dev_get_drvdata(dev);
  716. struct uart_port *port = mvuart->port;
  717. unsigned long flags;
  718. writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port));
  719. writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port));
  720. writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port));
  721. writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port));
  722. writel(mvuart->pm_regs.stat, port->membase + UART_STAT);
  723. spin_lock_irqsave(&mvebu_uart_lock, flags);
  724. writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV);
  725. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  726. writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP);
  727. uart_resume_port(&mvebu_uart_driver, port);
  728. return 0;
  729. }
  730. static const struct dev_pm_ops mvebu_uart_pm_ops = {
  731. .suspend = mvebu_uart_suspend,
  732. .resume = mvebu_uart_resume,
  733. };
  734. #endif /* CONFIG_PM */
  735. static const struct of_device_id mvebu_uart_of_match[];
  736. /* Counter to keep track of each UART port id when not using CONFIG_OF */
  737. static int uart_num_counter;
  738. static int mvebu_uart_probe(struct platform_device *pdev)
  739. {
  740. struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  741. const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
  742. &pdev->dev);
  743. struct uart_port *port;
  744. struct mvebu_uart *mvuart;
  745. int id, irq;
  746. if (!reg) {
  747. dev_err(&pdev->dev, "no registers defined\n");
  748. return -EINVAL;
  749. }
  750. /* Assume that all UART ports have a DT alias or none has */
  751. id = of_alias_get_id(pdev->dev.of_node, "serial");
  752. if (!pdev->dev.of_node || id < 0)
  753. pdev->id = uart_num_counter++;
  754. else
  755. pdev->id = id;
  756. if (pdev->id >= MVEBU_NR_UARTS) {
  757. dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
  758. MVEBU_NR_UARTS);
  759. return -EINVAL;
  760. }
  761. port = &mvebu_uart_ports[pdev->id];
  762. spin_lock_init(&port->lock);
  763. port->dev = &pdev->dev;
  764. port->type = PORT_MVEBU;
  765. port->ops = &mvebu_uart_ops;
  766. port->regshift = 0;
  767. port->fifosize = 32;
  768. port->iotype = UPIO_MEM32;
  769. port->flags = UPF_FIXED_PORT;
  770. port->line = pdev->id;
  771. /*
  772. * IRQ number is not stored in this structure because we may have two of
  773. * them per port (RX and TX). Instead, use the driver UART structure
  774. * array so called ->irq[].
  775. */
  776. port->irq = 0;
  777. port->irqflags = 0;
  778. port->mapbase = reg->start;
  779. port->membase = devm_ioremap_resource(&pdev->dev, reg);
  780. if (IS_ERR(port->membase))
  781. return PTR_ERR(port->membase);
  782. mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
  783. GFP_KERNEL);
  784. if (!mvuart)
  785. return -ENOMEM;
  786. /* Get controller data depending on the compatible string */
  787. mvuart->data = (struct mvebu_uart_driver_data *)match->data;
  788. mvuart->port = port;
  789. port->private_data = mvuart;
  790. platform_set_drvdata(pdev, mvuart);
  791. /* Get fixed clock frequency */
  792. mvuart->clk = devm_clk_get(&pdev->dev, NULL);
  793. if (IS_ERR(mvuart->clk)) {
  794. if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
  795. return PTR_ERR(mvuart->clk);
  796. if (IS_EXTENDED(port)) {
  797. dev_err(&pdev->dev, "unable to get UART clock\n");
  798. return PTR_ERR(mvuart->clk);
  799. }
  800. } else {
  801. if (!clk_prepare_enable(mvuart->clk))
  802. port->uartclk = clk_get_rate(mvuart->clk);
  803. }
  804. /* Manage interrupts */
  805. if (platform_irq_count(pdev) == 1) {
  806. /* Old bindings: no name on the single unamed UART0 IRQ */
  807. irq = platform_get_irq(pdev, 0);
  808. if (irq < 0)
  809. return irq;
  810. mvuart->irq[UART_IRQ_SUM] = irq;
  811. } else {
  812. /*
  813. * New bindings: named interrupts (RX, TX) for both UARTS,
  814. * only make use of uart-rx and uart-tx interrupts, do not use
  815. * uart-sum of UART0 port.
  816. */
  817. irq = platform_get_irq_byname(pdev, "uart-rx");
  818. if (irq < 0)
  819. return irq;
  820. mvuart->irq[UART_RX_IRQ] = irq;
  821. irq = platform_get_irq_byname(pdev, "uart-tx");
  822. if (irq < 0)
  823. return irq;
  824. mvuart->irq[UART_TX_IRQ] = irq;
  825. }
  826. /* UART Soft Reset*/
  827. writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
  828. udelay(1);
  829. writel(0, port->membase + UART_CTRL(port));
  830. return uart_add_one_port(&mvebu_uart_driver, port);
  831. }
  832. static struct mvebu_uart_driver_data uart_std_driver_data = {
  833. .is_ext = false,
  834. .regs.rbr = UART_STD_RBR,
  835. .regs.tsh = UART_STD_TSH,
  836. .regs.ctrl = UART_STD_CTRL1,
  837. .regs.intr = UART_STD_CTRL2,
  838. .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
  839. .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
  840. .flags.stat_tx_rdy = STAT_STD_TX_RDY,
  841. .flags.stat_rx_rdy = STAT_STD_RX_RDY,
  842. };
  843. static struct mvebu_uart_driver_data uart_ext_driver_data = {
  844. .is_ext = true,
  845. .regs.rbr = UART_EXT_RBR,
  846. .regs.tsh = UART_EXT_TSH,
  847. .regs.ctrl = UART_EXT_CTRL1,
  848. .regs.intr = UART_EXT_CTRL2,
  849. .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
  850. .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
  851. .flags.stat_tx_rdy = STAT_EXT_TX_RDY,
  852. .flags.stat_rx_rdy = STAT_EXT_RX_RDY,
  853. };
  854. /* Match table for of_platform binding */
  855. static const struct of_device_id mvebu_uart_of_match[] = {
  856. {
  857. .compatible = "marvell,armada-3700-uart",
  858. .data = (void *)&uart_std_driver_data,
  859. },
  860. {
  861. .compatible = "marvell,armada-3700-uart-ext",
  862. .data = (void *)&uart_ext_driver_data,
  863. },
  864. {}
  865. };
  866. static struct platform_driver mvebu_uart_platform_driver = {
  867. .probe = mvebu_uart_probe,
  868. .driver = {
  869. .name = "mvebu-uart",
  870. .of_match_table = of_match_ptr(mvebu_uart_of_match),
  871. .suppress_bind_attrs = true,
  872. #if defined(CONFIG_PM)
  873. .pm = &mvebu_uart_pm_ops,
  874. #endif /* CONFIG_PM */
  875. },
  876. };
  877. /* This code is based on clk-fixed-factor.c driver and modified. */
  878. struct mvebu_uart_clock {
  879. struct clk_hw clk_hw;
  880. int clock_idx;
  881. u32 pm_context_reg1;
  882. u32 pm_context_reg2;
  883. };
  884. struct mvebu_uart_clock_base {
  885. struct mvebu_uart_clock clocks[2];
  886. unsigned int parent_rates[5];
  887. int parent_idx;
  888. unsigned int div;
  889. void __iomem *reg1;
  890. void __iomem *reg2;
  891. bool configured;
  892. };
  893. #define PARENT_CLOCK_XTAL 4
  894. #define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw)
  895. #define to_uart_clock_base(uart_clock) container_of(uart_clock, \
  896. struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx])
  897. static int mvebu_uart_clock_prepare(struct clk_hw *hw)
  898. {
  899. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  900. struct mvebu_uart_clock_base *uart_clock_base =
  901. to_uart_clock_base(uart_clock);
  902. unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2;
  903. unsigned int parent_clock_idx, parent_clock_rate;
  904. unsigned long flags;
  905. unsigned int d1, d2;
  906. u64 divisor;
  907. u32 val;
  908. /*
  909. * This function just reconfigures UART Clock Control register (located
  910. * in UART1 address space which controls both UART1 and UART2) to
  911. * selected UART base clock and recalculates current UART1/UART2
  912. * divisors in their address spaces, so that final baudrate will not be
  913. * changed by switching UART parent clock. This is required for
  914. * otherwise kernel's boot log stops working - we need to ensure that
  915. * UART baudrate does not change during this setup. It is a one time
  916. * operation, it will execute only once and set `configured` to true,
  917. * and be skipped on subsequent calls. Because this UART Clock Control
  918. * register (UART_BRDV) is shared between UART1 baudrate function,
  919. * UART1 clock selector and UART2 clock selector, every access to
  920. * UART_BRDV (reg1) needs to be protected by a lock.
  921. */
  922. spin_lock_irqsave(&mvebu_uart_lock, flags);
  923. if (uart_clock_base->configured) {
  924. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  925. return 0;
  926. }
  927. parent_clock_idx = uart_clock_base->parent_idx;
  928. parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx];
  929. val = readl(uart_clock_base->reg1);
  930. if (uart_clock_base->div > CLK_TBG_DIV1_MAX) {
  931. d1 = CLK_TBG_DIV1_MAX;
  932. d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX;
  933. } else {
  934. d1 = uart_clock_base->div;
  935. d2 = 1;
  936. }
  937. if (val & CLK_NO_XTAL) {
  938. prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK;
  939. prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) *
  940. ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK);
  941. } else {
  942. prev_clock_idx = PARENT_CLOCK_XTAL;
  943. prev_d1d2 = 1;
  944. }
  945. /* Note that uart_clock_base->parent_rates[i] may not be available */
  946. prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx];
  947. /* Recalculate UART1 divisor so UART1 baudrate does not change */
  948. if (prev_clock_rate) {
  949. divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
  950. parent_clock_rate * prev_d1d2,
  951. prev_clock_rate * d1 * d2);
  952. if (divisor < 1)
  953. divisor = 1;
  954. else if (divisor > BRDV_BAUD_MAX)
  955. divisor = BRDV_BAUD_MAX;
  956. val = (val & ~BRDV_BAUD_MASK) | divisor;
  957. }
  958. if (parent_clock_idx != PARENT_CLOCK_XTAL) {
  959. /* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */
  960. val |= CLK_NO_XTAL;
  961. val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT);
  962. val |= d1 << CLK_TBG_DIV1_SHIFT;
  963. val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT);
  964. val |= d2 << CLK_TBG_DIV2_SHIFT;
  965. val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT);
  966. val |= parent_clock_idx << CLK_TBG_SEL_SHIFT;
  967. } else {
  968. /* Use XTAL, TBG bits are then ignored */
  969. val &= ~CLK_NO_XTAL;
  970. }
  971. writel(val, uart_clock_base->reg1);
  972. /* Recalculate UART2 divisor so UART2 baudrate does not change */
  973. if (prev_clock_rate) {
  974. val = readl(uart_clock_base->reg2);
  975. divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
  976. parent_clock_rate * prev_d1d2,
  977. prev_clock_rate * d1 * d2);
  978. if (divisor < 1)
  979. divisor = 1;
  980. else if (divisor > BRDV_BAUD_MAX)
  981. divisor = BRDV_BAUD_MAX;
  982. val = (val & ~BRDV_BAUD_MASK) | divisor;
  983. writel(val, uart_clock_base->reg2);
  984. }
  985. uart_clock_base->configured = true;
  986. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  987. return 0;
  988. }
  989. static int mvebu_uart_clock_enable(struct clk_hw *hw)
  990. {
  991. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  992. struct mvebu_uart_clock_base *uart_clock_base =
  993. to_uart_clock_base(uart_clock);
  994. unsigned long flags;
  995. u32 val;
  996. spin_lock_irqsave(&mvebu_uart_lock, flags);
  997. val = readl(uart_clock_base->reg1);
  998. if (uart_clock->clock_idx == 0)
  999. val &= ~UART1_CLK_DIS;
  1000. else
  1001. val &= ~UART2_CLK_DIS;
  1002. writel(val, uart_clock_base->reg1);
  1003. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  1004. return 0;
  1005. }
  1006. static void mvebu_uart_clock_disable(struct clk_hw *hw)
  1007. {
  1008. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1009. struct mvebu_uart_clock_base *uart_clock_base =
  1010. to_uart_clock_base(uart_clock);
  1011. unsigned long flags;
  1012. u32 val;
  1013. spin_lock_irqsave(&mvebu_uart_lock, flags);
  1014. val = readl(uart_clock_base->reg1);
  1015. if (uart_clock->clock_idx == 0)
  1016. val |= UART1_CLK_DIS;
  1017. else
  1018. val |= UART2_CLK_DIS;
  1019. writel(val, uart_clock_base->reg1);
  1020. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  1021. }
  1022. static int mvebu_uart_clock_is_enabled(struct clk_hw *hw)
  1023. {
  1024. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1025. struct mvebu_uart_clock_base *uart_clock_base =
  1026. to_uart_clock_base(uart_clock);
  1027. u32 val;
  1028. val = readl(uart_clock_base->reg1);
  1029. if (uart_clock->clock_idx == 0)
  1030. return !(val & UART1_CLK_DIS);
  1031. else
  1032. return !(val & UART2_CLK_DIS);
  1033. }
  1034. static int mvebu_uart_clock_save_context(struct clk_hw *hw)
  1035. {
  1036. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1037. struct mvebu_uart_clock_base *uart_clock_base =
  1038. to_uart_clock_base(uart_clock);
  1039. unsigned long flags;
  1040. spin_lock_irqsave(&mvebu_uart_lock, flags);
  1041. uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1);
  1042. uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2);
  1043. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  1044. return 0;
  1045. }
  1046. static void mvebu_uart_clock_restore_context(struct clk_hw *hw)
  1047. {
  1048. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1049. struct mvebu_uart_clock_base *uart_clock_base =
  1050. to_uart_clock_base(uart_clock);
  1051. unsigned long flags;
  1052. spin_lock_irqsave(&mvebu_uart_lock, flags);
  1053. writel(uart_clock->pm_context_reg1, uart_clock_base->reg1);
  1054. writel(uart_clock->pm_context_reg2, uart_clock_base->reg2);
  1055. spin_unlock_irqrestore(&mvebu_uart_lock, flags);
  1056. }
  1057. static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw,
  1058. unsigned long parent_rate)
  1059. {
  1060. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1061. struct mvebu_uart_clock_base *uart_clock_base =
  1062. to_uart_clock_base(uart_clock);
  1063. return parent_rate / uart_clock_base->div;
  1064. }
  1065. static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate,
  1066. unsigned long *parent_rate)
  1067. {
  1068. struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
  1069. struct mvebu_uart_clock_base *uart_clock_base =
  1070. to_uart_clock_base(uart_clock);
  1071. return *parent_rate / uart_clock_base->div;
  1072. }
  1073. static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate,
  1074. unsigned long parent_rate)
  1075. {
  1076. /*
  1077. * We must report success but we can do so unconditionally because
  1078. * mvebu_uart_clock_round_rate returns values that ensure this call is a
  1079. * nop.
  1080. */
  1081. return 0;
  1082. }
  1083. static const struct clk_ops mvebu_uart_clock_ops = {
  1084. .prepare = mvebu_uart_clock_prepare,
  1085. .enable = mvebu_uart_clock_enable,
  1086. .disable = mvebu_uart_clock_disable,
  1087. .is_enabled = mvebu_uart_clock_is_enabled,
  1088. .save_context = mvebu_uart_clock_save_context,
  1089. .restore_context = mvebu_uart_clock_restore_context,
  1090. .round_rate = mvebu_uart_clock_round_rate,
  1091. .set_rate = mvebu_uart_clock_set_rate,
  1092. .recalc_rate = mvebu_uart_clock_recalc_rate,
  1093. };
  1094. static int mvebu_uart_clock_register(struct device *dev,
  1095. struct mvebu_uart_clock *uart_clock,
  1096. const char *name,
  1097. const char *parent_name)
  1098. {
  1099. struct clk_init_data init = { };
  1100. uart_clock->clk_hw.init = &init;
  1101. init.name = name;
  1102. init.ops = &mvebu_uart_clock_ops;
  1103. init.flags = 0;
  1104. init.num_parents = 1;
  1105. init.parent_names = &parent_name;
  1106. return devm_clk_hw_register(dev, &uart_clock->clk_hw);
  1107. }
  1108. static int mvebu_uart_clock_probe(struct platform_device *pdev)
  1109. {
  1110. static const char *const uart_clk_names[] = { "uart_1", "uart_2" };
  1111. static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P",
  1112. "TBG-A-S", "TBG-B-S",
  1113. "xtal" };
  1114. struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)];
  1115. struct mvebu_uart_clock_base *uart_clock_base;
  1116. struct clk_hw_onecell_data *hw_clk_data;
  1117. struct device *dev = &pdev->dev;
  1118. int i, parent_clk_idx, ret;
  1119. unsigned long div, rate;
  1120. struct resource *res;
  1121. unsigned int d1, d2;
  1122. BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) !=
  1123. ARRAY_SIZE(uart_clock_base->clocks));
  1124. BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) !=
  1125. ARRAY_SIZE(uart_clock_base->parent_rates));
  1126. uart_clock_base = devm_kzalloc(dev,
  1127. sizeof(*uart_clock_base),
  1128. GFP_KERNEL);
  1129. if (!uart_clock_base)
  1130. return -ENOMEM;
  1131. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1132. if (!res) {
  1133. dev_err(dev, "Couldn't get first register\n");
  1134. return -ENOENT;
  1135. }
  1136. /*
  1137. * UART Clock Control register (reg1 / UART_BRDV) is in the address
  1138. * space of UART1 (standard UART variant), controls parent clock and
  1139. * dividers for both UART1 and UART2 and is supplied via DT as the first
  1140. * resource. Therefore use ioremap() rather than ioremap_resource() to
  1141. * avoid conflicts with UART1 driver. Access to UART_BRDV is protected
  1142. * by a lock shared between clock and UART driver.
  1143. */
  1144. uart_clock_base->reg1 = devm_ioremap(dev, res->start,
  1145. resource_size(res));
  1146. if (!uart_clock_base->reg1)
  1147. return -ENOMEM;
  1148. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1149. if (!res) {
  1150. dev_err(dev, "Couldn't get second register\n");
  1151. return -ENOENT;
  1152. }
  1153. /*
  1154. * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address
  1155. * space of UART2 (extended UART variant), controls only one UART2
  1156. * specific divider and is supplied via DT as second resource.
  1157. * Therefore use ioremap() rather than ioremap_resource() to avoid
  1158. * conflicts with UART2 driver. Access to UART_BRDV is protected by a
  1159. * by lock shared between clock and UART driver.
  1160. */
  1161. uart_clock_base->reg2 = devm_ioremap(dev, res->start,
  1162. resource_size(res));
  1163. if (!uart_clock_base->reg2)
  1164. return -ENOMEM;
  1165. hw_clk_data = devm_kzalloc(dev,
  1166. struct_size(hw_clk_data, hws,
  1167. ARRAY_SIZE(uart_clk_names)),
  1168. GFP_KERNEL);
  1169. if (!hw_clk_data)
  1170. return -ENOMEM;
  1171. hw_clk_data->num = ARRAY_SIZE(uart_clk_names);
  1172. for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
  1173. hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw;
  1174. uart_clock_base->clocks[i].clock_idx = i;
  1175. }
  1176. parent_clk_idx = -1;
  1177. for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
  1178. parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]);
  1179. if (IS_ERR(parent_clks[i])) {
  1180. if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER)
  1181. return -EPROBE_DEFER;
  1182. dev_warn(dev, "Couldn't get the parent clock %s: %ld\n",
  1183. parent_clk_names[i], PTR_ERR(parent_clks[i]));
  1184. continue;
  1185. }
  1186. ret = clk_prepare_enable(parent_clks[i]);
  1187. if (ret) {
  1188. dev_warn(dev, "Couldn't enable parent clock %s: %d\n",
  1189. parent_clk_names[i], ret);
  1190. continue;
  1191. }
  1192. rate = clk_get_rate(parent_clks[i]);
  1193. uart_clock_base->parent_rates[i] = rate;
  1194. if (i != PARENT_CLOCK_XTAL) {
  1195. /*
  1196. * Calculate the smallest TBG d1 and d2 divisors that
  1197. * still can provide 9600 baudrate.
  1198. */
  1199. d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
  1200. BRDV_BAUD_MAX);
  1201. if (d1 < 1)
  1202. d1 = 1;
  1203. else if (d1 > CLK_TBG_DIV1_MAX)
  1204. d1 = CLK_TBG_DIV1_MAX;
  1205. d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
  1206. BRDV_BAUD_MAX * d1);
  1207. if (d2 < 1)
  1208. d2 = 1;
  1209. else if (d2 > CLK_TBG_DIV2_MAX)
  1210. d2 = CLK_TBG_DIV2_MAX;
  1211. } else {
  1212. /*
  1213. * When UART clock uses XTAL clock as a source then it
  1214. * is not possible to use d1 and d2 divisors.
  1215. */
  1216. d1 = d2 = 1;
  1217. }
  1218. /* Skip clock source which cannot provide 9600 baudrate */
  1219. if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2)
  1220. continue;
  1221. /*
  1222. * Choose TBG clock source with the smallest divisors. Use XTAL
  1223. * clock source only in case TBG is not available as XTAL cannot
  1224. * be used for baudrates higher than 230400.
  1225. */
  1226. if (parent_clk_idx == -1 ||
  1227. (i != PARENT_CLOCK_XTAL && div > d1 * d2)) {
  1228. parent_clk_idx = i;
  1229. div = d1 * d2;
  1230. }
  1231. }
  1232. for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
  1233. if (i == parent_clk_idx || IS_ERR(parent_clks[i]))
  1234. continue;
  1235. clk_disable_unprepare(parent_clks[i]);
  1236. devm_clk_put(dev, parent_clks[i]);
  1237. }
  1238. if (parent_clk_idx == -1) {
  1239. dev_err(dev, "No usable parent clock\n");
  1240. return -ENOENT;
  1241. }
  1242. uart_clock_base->parent_idx = parent_clk_idx;
  1243. uart_clock_base->div = div;
  1244. dev_notice(dev, "Using parent clock %s as base UART clock\n",
  1245. __clk_get_name(parent_clks[parent_clk_idx]));
  1246. for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
  1247. ret = mvebu_uart_clock_register(dev,
  1248. &uart_clock_base->clocks[i],
  1249. uart_clk_names[i],
  1250. __clk_get_name(parent_clks[parent_clk_idx]));
  1251. if (ret) {
  1252. dev_err(dev, "Can't register UART clock %d: %d\n",
  1253. i, ret);
  1254. return ret;
  1255. }
  1256. }
  1257. return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
  1258. hw_clk_data);
  1259. }
  1260. static const struct of_device_id mvebu_uart_clock_of_match[] = {
  1261. { .compatible = "marvell,armada-3700-uart-clock", },
  1262. { }
  1263. };
  1264. static struct platform_driver mvebu_uart_clock_platform_driver = {
  1265. .probe = mvebu_uart_clock_probe,
  1266. .driver = {
  1267. .name = "mvebu-uart-clock",
  1268. .of_match_table = mvebu_uart_clock_of_match,
  1269. },
  1270. };
  1271. static int __init mvebu_uart_init(void)
  1272. {
  1273. int ret;
  1274. ret = uart_register_driver(&mvebu_uart_driver);
  1275. if (ret)
  1276. return ret;
  1277. ret = platform_driver_register(&mvebu_uart_clock_platform_driver);
  1278. if (ret) {
  1279. uart_unregister_driver(&mvebu_uart_driver);
  1280. return ret;
  1281. }
  1282. ret = platform_driver_register(&mvebu_uart_platform_driver);
  1283. if (ret) {
  1284. platform_driver_unregister(&mvebu_uart_clock_platform_driver);
  1285. uart_unregister_driver(&mvebu_uart_driver);
  1286. return ret;
  1287. }
  1288. return 0;
  1289. }
  1290. arch_initcall(mvebu_uart_init);