arc_timer.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. */
  6. /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
  7. * programmed to go from @count to @limit and optionally interrupt.
  8. * We've designated TIMER0 for clockevents and TIMER1 for clocksource
  9. *
  10. * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
  11. * which are suitable for UP and SMP based clocksources respectively
  12. */
  13. #include <linux/interrupt.h>
  14. #include <linux/bits.h>
  15. #include <linux/clk.h>
  16. #include <linux/clk-provider.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/clockchips.h>
  19. #include <linux/cpu.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/sched_clock.h>
  23. #include <soc/arc/timers.h>
  24. #include <soc/arc/mcip.h>
  25. static unsigned long arc_timer_freq;
  26. static int noinline arc_get_timer_clk(struct device_node *node)
  27. {
  28. struct clk *clk;
  29. int ret;
  30. clk = of_clk_get(node, 0);
  31. if (IS_ERR(clk)) {
  32. pr_err("timer missing clk\n");
  33. return PTR_ERR(clk);
  34. }
  35. ret = clk_prepare_enable(clk);
  36. if (ret) {
  37. pr_err("Couldn't enable parent clk\n");
  38. return ret;
  39. }
  40. arc_timer_freq = clk_get_rate(clk);
  41. return 0;
  42. }
  43. /********** Clock Source Device *********/
  44. #ifdef CONFIG_ARC_TIMERS_64BIT
  45. static u64 arc_read_gfrc(struct clocksource *cs)
  46. {
  47. unsigned long flags;
  48. u32 l, h;
  49. /*
  50. * From a programming model pov, there seems to be just one instance of
  51. * MCIP_CMD/MCIP_READBACK however micro-architecturally there's
  52. * an instance PER ARC CORE (not per cluster), and there are dedicated
  53. * hardware decode logic (per core) inside ARConnect to handle
  54. * simultaneous read/write accesses from cores via those two registers.
  55. * So several concurrent commands to ARConnect are OK if they are
  56. * trying to access two different sub-components (like GFRC,
  57. * inter-core interrupt, etc...). HW also supports simultaneously
  58. * accessing GFRC by multiple cores.
  59. * That's why it is safe to disable hard interrupts on the local CPU
  60. * before access to GFRC instead of taking global MCIP spinlock
  61. * defined in arch/arc/kernel/mcip.c
  62. */
  63. local_irq_save(flags);
  64. __mcip_cmd(CMD_GFRC_READ_LO, 0);
  65. l = read_aux_reg(ARC_REG_MCIP_READBACK);
  66. __mcip_cmd(CMD_GFRC_READ_HI, 0);
  67. h = read_aux_reg(ARC_REG_MCIP_READBACK);
  68. local_irq_restore(flags);
  69. return (((u64)h) << 32) | l;
  70. }
  71. static notrace u64 arc_gfrc_clock_read(void)
  72. {
  73. return arc_read_gfrc(NULL);
  74. }
  75. static struct clocksource arc_counter_gfrc = {
  76. .name = "ARConnect GFRC",
  77. .rating = 400,
  78. .read = arc_read_gfrc,
  79. .mask = CLOCKSOURCE_MASK(64),
  80. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  81. };
  82. static int __init arc_cs_setup_gfrc(struct device_node *node)
  83. {
  84. struct mcip_bcr mp;
  85. int ret;
  86. READ_BCR(ARC_REG_MCIP_BCR, mp);
  87. if (!mp.gfrc) {
  88. pr_warn("Global-64-bit-Ctr clocksource not detected\n");
  89. return -ENXIO;
  90. }
  91. ret = arc_get_timer_clk(node);
  92. if (ret)
  93. return ret;
  94. sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
  95. return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
  96. }
  97. TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
  98. #define AUX_RTC_CTRL 0x103
  99. #define AUX_RTC_LOW 0x104
  100. #define AUX_RTC_HIGH 0x105
  101. static u64 arc_read_rtc(struct clocksource *cs)
  102. {
  103. unsigned long status;
  104. u32 l, h;
  105. /*
  106. * hardware has an internal state machine which tracks readout of
  107. * low/high and updates the CTRL.status if
  108. * - interrupt/exception taken between the two reads
  109. * - high increments after low has been read
  110. */
  111. do {
  112. l = read_aux_reg(AUX_RTC_LOW);
  113. h = read_aux_reg(AUX_RTC_HIGH);
  114. status = read_aux_reg(AUX_RTC_CTRL);
  115. } while (!(status & BIT(31)));
  116. return (((u64)h) << 32) | l;
  117. }
  118. static notrace u64 arc_rtc_clock_read(void)
  119. {
  120. return arc_read_rtc(NULL);
  121. }
  122. static struct clocksource arc_counter_rtc = {
  123. .name = "ARCv2 RTC",
  124. .rating = 350,
  125. .read = arc_read_rtc,
  126. .mask = CLOCKSOURCE_MASK(64),
  127. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  128. };
  129. static int __init arc_cs_setup_rtc(struct device_node *node)
  130. {
  131. struct bcr_timer timer;
  132. int ret;
  133. READ_BCR(ARC_REG_TIMERS_BCR, timer);
  134. if (!timer.rtc) {
  135. pr_warn("Local-64-bit-Ctr clocksource not detected\n");
  136. return -ENXIO;
  137. }
  138. /* Local to CPU hence not usable in SMP */
  139. if (IS_ENABLED(CONFIG_SMP)) {
  140. pr_warn("Local-64-bit-Ctr not usable in SMP\n");
  141. return -EINVAL;
  142. }
  143. ret = arc_get_timer_clk(node);
  144. if (ret)
  145. return ret;
  146. write_aux_reg(AUX_RTC_CTRL, 1);
  147. sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
  148. return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
  149. }
  150. TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
  151. #endif
  152. /*
  153. * 32bit TIMER1 to keep counting monotonically and wraparound
  154. */
  155. static u64 arc_read_timer1(struct clocksource *cs)
  156. {
  157. return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
  158. }
  159. static notrace u64 arc_timer1_clock_read(void)
  160. {
  161. return arc_read_timer1(NULL);
  162. }
  163. static struct clocksource arc_counter_timer1 = {
  164. .name = "ARC Timer1",
  165. .rating = 300,
  166. .read = arc_read_timer1,
  167. .mask = CLOCKSOURCE_MASK(32),
  168. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  169. };
  170. static int __init arc_cs_setup_timer1(struct device_node *node)
  171. {
  172. int ret;
  173. /* Local to CPU hence not usable in SMP */
  174. if (IS_ENABLED(CONFIG_SMP))
  175. return -EINVAL;
  176. ret = arc_get_timer_clk(node);
  177. if (ret)
  178. return ret;
  179. write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
  180. write_aux_reg(ARC_REG_TIMER1_CNT, 0);
  181. write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
  182. sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
  183. return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
  184. }
  185. /********** Clock Event Device *********/
  186. static int arc_timer_irq;
  187. /*
  188. * Arm the timer to interrupt after @cycles
  189. * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
  190. */
  191. static void arc_timer_event_setup(unsigned int cycles)
  192. {
  193. write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
  194. write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
  195. write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
  196. }
  197. static int arc_clkevent_set_next_event(unsigned long delta,
  198. struct clock_event_device *dev)
  199. {
  200. arc_timer_event_setup(delta);
  201. return 0;
  202. }
  203. static int arc_clkevent_set_periodic(struct clock_event_device *dev)
  204. {
  205. /*
  206. * At X Hz, 1 sec = 1000ms -> X cycles;
  207. * 10ms -> X / 100 cycles
  208. */
  209. arc_timer_event_setup(arc_timer_freq / HZ);
  210. return 0;
  211. }
  212. static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
  213. .name = "ARC Timer0",
  214. .features = CLOCK_EVT_FEAT_ONESHOT |
  215. CLOCK_EVT_FEAT_PERIODIC,
  216. .rating = 300,
  217. .set_next_event = arc_clkevent_set_next_event,
  218. .set_state_periodic = arc_clkevent_set_periodic,
  219. };
  220. static irqreturn_t timer_irq_handler(int irq, void *dev_id)
  221. {
  222. /*
  223. * Note that generic IRQ core could have passed @evt for @dev_id if
  224. * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
  225. */
  226. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  227. int irq_reenable = clockevent_state_periodic(evt);
  228. /*
  229. * 1. ACK the interrupt
  230. * - For ARC700, any write to CTRL reg ACKs it, so just rewrite
  231. * Count when [N]ot [H]alted bit.
  232. * - For HS3x, it is a bit subtle. On taken count-down interrupt,
  233. * IP bit [3] is set, which needs to be cleared for ACK'ing.
  234. * The write below can only update the other two bits, hence
  235. * explicitly clears IP bit
  236. * 2. Re-arm interrupt if periodic by writing to IE bit [0]
  237. */
  238. write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
  239. evt->event_handler(evt);
  240. return IRQ_HANDLED;
  241. }
  242. static int arc_timer_starting_cpu(unsigned int cpu)
  243. {
  244. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  245. evt->cpumask = cpumask_of(smp_processor_id());
  246. clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
  247. enable_percpu_irq(arc_timer_irq, 0);
  248. return 0;
  249. }
  250. static int arc_timer_dying_cpu(unsigned int cpu)
  251. {
  252. disable_percpu_irq(arc_timer_irq);
  253. return 0;
  254. }
  255. /*
  256. * clockevent setup for boot CPU
  257. */
  258. static int __init arc_clockevent_setup(struct device_node *node)
  259. {
  260. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  261. int ret;
  262. arc_timer_irq = irq_of_parse_and_map(node, 0);
  263. if (arc_timer_irq <= 0) {
  264. pr_err("clockevent: missing irq\n");
  265. return -EINVAL;
  266. }
  267. ret = arc_get_timer_clk(node);
  268. if (ret)
  269. return ret;
  270. /* Needs apriori irq_set_percpu_devid() done in intc map function */
  271. ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
  272. "Timer0 (per-cpu-tick)", evt);
  273. if (ret) {
  274. pr_err("clockevent: unable to request irq\n");
  275. return ret;
  276. }
  277. ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
  278. "clockevents/arc/timer:starting",
  279. arc_timer_starting_cpu,
  280. arc_timer_dying_cpu);
  281. if (ret) {
  282. pr_err("Failed to setup hotplug state\n");
  283. return ret;
  284. }
  285. return 0;
  286. }
  287. static int __init arc_of_timer_init(struct device_node *np)
  288. {
  289. static int init_count = 0;
  290. int ret;
  291. if (!init_count) {
  292. init_count = 1;
  293. ret = arc_clockevent_setup(np);
  294. } else {
  295. ret = arc_cs_setup_timer1(np);
  296. }
  297. return ret;
  298. }
  299. TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);