timer-atmel-tcb.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/init.h>
  3. #include <linux/clocksource.h>
  4. #include <linux/clockchips.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/irq.h>
  7. #include <linux/clk.h>
  8. #include <linux/delay.h>
  9. #include <linux/err.h>
  10. #include <linux/ioport.h>
  11. #include <linux/io.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/sched_clock.h>
  15. #include <linux/syscore_ops.h>
  16. #include <soc/at91/atmel_tcb.h>
  17. /*
  18. * We're configured to use a specific TC block, one that's not hooked
  19. * up to external hardware, to provide a time solution:
  20. *
  21. * - Two channels combine to create a free-running 32 bit counter
  22. * with a base rate of 5+ MHz, packaged as a clocksource (with
  23. * resolution better than 200 nsec).
  24. * - Some chips support 32 bit counter. A single channel is used for
  25. * this 32 bit free-running counter. the second channel is not used.
  26. *
  27. * - The third channel may be used to provide a clockevent source, used in
  28. * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ,
  29. * and can handle delays of up to two seconds. For 32-bit counters, it runs at
  30. * the same rate as the clocksource
  31. *
  32. * REVISIT behavior during system suspend states... we should disable
  33. * all clocks and save the power. Easily done for clockevent devices,
  34. * but clocksources won't necessarily get the needed notifications.
  35. * For deeper system sleep states, this will be mandatory...
  36. */
  37. static void __iomem *tcaddr;
  38. static struct
  39. {
  40. u32 cmr;
  41. u32 imr;
  42. u32 rc;
  43. bool clken;
  44. } tcb_cache[3];
  45. static u32 bmr_cache;
  46. static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 };
  47. static u64 tc_get_cycles(struct clocksource *cs)
  48. {
  49. unsigned long flags;
  50. u32 lower, upper;
  51. raw_local_irq_save(flags);
  52. do {
  53. upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
  54. lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
  55. } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
  56. raw_local_irq_restore(flags);
  57. return (upper << 16) | lower;
  58. }
  59. static u64 tc_get_cycles32(struct clocksource *cs)
  60. {
  61. return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
  62. }
  63. static void tc_clksrc_suspend(struct clocksource *cs)
  64. {
  65. int i;
  66. for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
  67. tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
  68. tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
  69. tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
  70. tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
  71. ATMEL_TC_CLKSTA);
  72. }
  73. bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
  74. }
  75. static void tc_clksrc_resume(struct clocksource *cs)
  76. {
  77. int i;
  78. for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
  79. /* Restore registers for the channel, RA and RB are not used */
  80. writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
  81. writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
  82. writel(0, tcaddr + ATMEL_TC_REG(i, RA));
  83. writel(0, tcaddr + ATMEL_TC_REG(i, RB));
  84. /* Disable all the interrupts */
  85. writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
  86. /* Reenable interrupts that were enabled before suspending */
  87. writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
  88. /* Start the clock if it was used */
  89. if (tcb_cache[i].clken)
  90. writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
  91. }
  92. /* Dual channel, chain channels */
  93. writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
  94. /* Finally, trigger all the channels*/
  95. writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
  96. }
  97. static struct clocksource clksrc = {
  98. .rating = 200,
  99. .read = tc_get_cycles,
  100. .mask = CLOCKSOURCE_MASK(32),
  101. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  102. .suspend = tc_clksrc_suspend,
  103. .resume = tc_clksrc_resume,
  104. };
  105. static u64 notrace tc_sched_clock_read(void)
  106. {
  107. return tc_get_cycles(&clksrc);
  108. }
  109. static u64 notrace tc_sched_clock_read32(void)
  110. {
  111. return tc_get_cycles32(&clksrc);
  112. }
  113. static struct delay_timer tc_delay_timer;
  114. static unsigned long tc_delay_timer_read(void)
  115. {
  116. return tc_get_cycles(&clksrc);
  117. }
  118. static unsigned long notrace tc_delay_timer_read32(void)
  119. {
  120. return tc_get_cycles32(&clksrc);
  121. }
  122. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  123. struct tc_clkevt_device {
  124. struct clock_event_device clkevt;
  125. struct clk *clk;
  126. u32 rate;
  127. void __iomem *regs;
  128. };
  129. static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
  130. {
  131. return container_of(clkevt, struct tc_clkevt_device, clkevt);
  132. }
  133. static u32 timer_clock;
  134. static int tc_shutdown(struct clock_event_device *d)
  135. {
  136. struct tc_clkevt_device *tcd = to_tc_clkevt(d);
  137. void __iomem *regs = tcd->regs;
  138. writel(0xff, regs + ATMEL_TC_REG(2, IDR));
  139. writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
  140. if (!clockevent_state_detached(d))
  141. clk_disable(tcd->clk);
  142. return 0;
  143. }
  144. static int tc_set_oneshot(struct clock_event_device *d)
  145. {
  146. struct tc_clkevt_device *tcd = to_tc_clkevt(d);
  147. void __iomem *regs = tcd->regs;
  148. if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
  149. tc_shutdown(d);
  150. clk_enable(tcd->clk);
  151. /* count up to RC, then irq and stop */
  152. writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
  153. ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
  154. writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
  155. /* set_next_event() configures and starts the timer */
  156. return 0;
  157. }
  158. static int tc_set_periodic(struct clock_event_device *d)
  159. {
  160. struct tc_clkevt_device *tcd = to_tc_clkevt(d);
  161. void __iomem *regs = tcd->regs;
  162. if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
  163. tc_shutdown(d);
  164. /* By not making the gentime core emulate periodic mode on top
  165. * of oneshot, we get lower overhead and improved accuracy.
  166. */
  167. clk_enable(tcd->clk);
  168. /* count up to RC, then irq and restart */
  169. writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
  170. regs + ATMEL_TC_REG(2, CMR));
  171. writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
  172. /* Enable clock and interrupts on RC compare */
  173. writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
  174. /* go go gadget! */
  175. writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
  176. ATMEL_TC_REG(2, CCR));
  177. return 0;
  178. }
  179. static int tc_next_event(unsigned long delta, struct clock_event_device *d)
  180. {
  181. writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
  182. /* go go gadget! */
  183. writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
  184. tcaddr + ATMEL_TC_REG(2, CCR));
  185. return 0;
  186. }
  187. static struct tc_clkevt_device clkevt = {
  188. .clkevt = {
  189. .features = CLOCK_EVT_FEAT_PERIODIC |
  190. CLOCK_EVT_FEAT_ONESHOT,
  191. /* Should be lower than at91rm9200's system timer */
  192. .rating = 125,
  193. .set_next_event = tc_next_event,
  194. .set_state_shutdown = tc_shutdown,
  195. .set_state_periodic = tc_set_periodic,
  196. .set_state_oneshot = tc_set_oneshot,
  197. },
  198. };
  199. static irqreturn_t ch2_irq(int irq, void *handle)
  200. {
  201. struct tc_clkevt_device *dev = handle;
  202. unsigned int sr;
  203. sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
  204. if (sr & ATMEL_TC_CPCS) {
  205. dev->clkevt.event_handler(&dev->clkevt);
  206. return IRQ_HANDLED;
  207. }
  208. return IRQ_NONE;
  209. }
  210. static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
  211. {
  212. int ret;
  213. struct clk *t2_clk = tc->clk[2];
  214. int irq = tc->irq[2];
  215. int bits = tc->tcb_config->counter_width;
  216. /* try to enable t2 clk to avoid future errors in mode change */
  217. ret = clk_prepare_enable(t2_clk);
  218. if (ret)
  219. return ret;
  220. clkevt.regs = tc->regs;
  221. clkevt.clk = t2_clk;
  222. if (bits == 32) {
  223. timer_clock = divisor_idx;
  224. clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx];
  225. } else {
  226. ret = clk_prepare_enable(tc->slow_clk);
  227. if (ret) {
  228. clk_disable_unprepare(t2_clk);
  229. return ret;
  230. }
  231. clkevt.rate = clk_get_rate(tc->slow_clk);
  232. timer_clock = ATMEL_TC_TIMER_CLOCK5;
  233. }
  234. clk_disable(t2_clk);
  235. clkevt.clkevt.cpumask = cpumask_of(0);
  236. ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
  237. if (ret) {
  238. clk_unprepare(t2_clk);
  239. if (bits != 32)
  240. clk_disable_unprepare(tc->slow_clk);
  241. return ret;
  242. }
  243. clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1);
  244. return ret;
  245. }
  246. #else /* !CONFIG_GENERIC_CLOCKEVENTS */
  247. static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
  248. {
  249. /* NOTHING */
  250. return 0;
  251. }
  252. #endif
  253. static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
  254. {
  255. /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
  256. writel(mck_divisor_idx /* likely divide-by-8 */
  257. | ATMEL_TC_WAVE
  258. | ATMEL_TC_WAVESEL_UP /* free-run */
  259. | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
  260. | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
  261. | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
  262. tcaddr + ATMEL_TC_REG(0, CMR));
  263. writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
  264. writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
  265. writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
  266. writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
  267. /* channel 1: waveform mode, input TIOA0 */
  268. writel(ATMEL_TC_XC1 /* input: TIOA0 */
  269. | ATMEL_TC_WAVE
  270. | ATMEL_TC_WAVESEL_UP, /* free-run */
  271. tcaddr + ATMEL_TC_REG(1, CMR));
  272. writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
  273. writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
  274. /* chain channel 0 to channel 1*/
  275. writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
  276. /* then reset all the timers */
  277. writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
  278. }
  279. static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
  280. {
  281. /* channel 0: waveform mode, input mclk/8 */
  282. writel(mck_divisor_idx /* likely divide-by-8 */
  283. | ATMEL_TC_WAVE
  284. | ATMEL_TC_WAVESEL_UP, /* free-run */
  285. tcaddr + ATMEL_TC_REG(0, CMR));
  286. writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
  287. writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
  288. /* then reset all the timers */
  289. writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
  290. }
  291. static struct atmel_tcb_config tcb_rm9200_config = {
  292. .counter_width = 16,
  293. };
  294. static struct atmel_tcb_config tcb_sam9x5_config = {
  295. .counter_width = 32,
  296. };
  297. static struct atmel_tcb_config tcb_sama5d2_config = {
  298. .counter_width = 32,
  299. .has_gclk = 1,
  300. };
  301. static const struct of_device_id atmel_tcb_of_match[] = {
  302. { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
  303. { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
  304. { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
  305. { /* sentinel */ }
  306. };
  307. static int __init tcb_clksrc_init(struct device_node *node)
  308. {
  309. struct atmel_tc tc;
  310. struct clk *t0_clk;
  311. const struct of_device_id *match;
  312. u64 (*tc_sched_clock)(void);
  313. u32 rate, divided_rate = 0;
  314. int best_divisor_idx = -1;
  315. int bits;
  316. int i;
  317. int ret;
  318. /* Protect against multiple calls */
  319. if (tcaddr)
  320. return 0;
  321. tc.regs = of_iomap(node->parent, 0);
  322. if (!tc.regs)
  323. return -ENXIO;
  324. t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
  325. if (IS_ERR(t0_clk))
  326. return PTR_ERR(t0_clk);
  327. tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
  328. if (IS_ERR(tc.slow_clk))
  329. return PTR_ERR(tc.slow_clk);
  330. tc.clk[0] = t0_clk;
  331. tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
  332. if (IS_ERR(tc.clk[1]))
  333. tc.clk[1] = t0_clk;
  334. tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
  335. if (IS_ERR(tc.clk[2]))
  336. tc.clk[2] = t0_clk;
  337. tc.irq[2] = of_irq_get(node->parent, 2);
  338. if (tc.irq[2] <= 0) {
  339. tc.irq[2] = of_irq_get(node->parent, 0);
  340. if (tc.irq[2] <= 0)
  341. return -EINVAL;
  342. }
  343. match = of_match_node(atmel_tcb_of_match, node->parent);
  344. if (!match)
  345. return -ENODEV;
  346. tc.tcb_config = match->data;
  347. bits = tc.tcb_config->counter_width;
  348. for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
  349. writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
  350. ret = clk_prepare_enable(t0_clk);
  351. if (ret) {
  352. pr_debug("can't enable T0 clk\n");
  353. return ret;
  354. }
  355. /* How fast will we be counting? Pick something over 5 MHz. */
  356. rate = (u32) clk_get_rate(t0_clk);
  357. i = 0;
  358. if (tc.tcb_config->has_gclk)
  359. i = 1;
  360. for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
  361. unsigned divisor = atmel_tcb_divisors[i];
  362. unsigned tmp;
  363. tmp = rate / divisor;
  364. pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
  365. if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000))
  366. break;
  367. divided_rate = tmp;
  368. best_divisor_idx = i;
  369. }
  370. clksrc.name = kbasename(node->parent->full_name);
  371. clkevt.clkevt.name = kbasename(node->parent->full_name);
  372. pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
  373. ((divided_rate % 1000000) + 500) / 1000);
  374. tcaddr = tc.regs;
  375. if (bits == 32) {
  376. /* use appropriate function to read 32 bit counter */
  377. clksrc.read = tc_get_cycles32;
  378. /* setup only channel 0 */
  379. tcb_setup_single_chan(&tc, best_divisor_idx);
  380. tc_sched_clock = tc_sched_clock_read32;
  381. tc_delay_timer.read_current_timer = tc_delay_timer_read32;
  382. } else {
  383. /* we have three clocks no matter what the
  384. * underlying platform supports.
  385. */
  386. ret = clk_prepare_enable(tc.clk[1]);
  387. if (ret) {
  388. pr_debug("can't enable T1 clk\n");
  389. goto err_disable_t0;
  390. }
  391. /* setup both channel 0 & 1 */
  392. tcb_setup_dual_chan(&tc, best_divisor_idx);
  393. tc_sched_clock = tc_sched_clock_read;
  394. tc_delay_timer.read_current_timer = tc_delay_timer_read;
  395. }
  396. /* and away we go! */
  397. ret = clocksource_register_hz(&clksrc, divided_rate);
  398. if (ret)
  399. goto err_disable_t1;
  400. /* channel 2: periodic and oneshot timer support */
  401. ret = setup_clkevents(&tc, best_divisor_idx);
  402. if (ret)
  403. goto err_unregister_clksrc;
  404. sched_clock_register(tc_sched_clock, 32, divided_rate);
  405. tc_delay_timer.freq = divided_rate;
  406. register_current_timer_delay(&tc_delay_timer);
  407. return 0;
  408. err_unregister_clksrc:
  409. clocksource_unregister(&clksrc);
  410. err_disable_t1:
  411. if (bits != 32)
  412. clk_disable_unprepare(tc.clk[1]);
  413. err_disable_t0:
  414. clk_disable_unprepare(t0_clk);
  415. tcaddr = NULL;
  416. return ret;
  417. }
  418. TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);