exynos_mct.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* linux/arch/arm/mach-exynos4/mct.c
  3. *
  4. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Exynos4 MCT(Multi-Core Timer) support
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/irq.h>
  11. #include <linux/err.h>
  12. #include <linux/clk.h>
  13. #include <linux/clockchips.h>
  14. #include <linux/cpu.h>
  15. #include <linux/delay.h>
  16. #include <linux/percpu.h>
  17. #include <linux/of.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_address.h>
  20. #include <linux/clocksource.h>
  21. #include <linux/sched_clock.h>
  22. #define EXYNOS4_MCTREG(x) (x)
  23. #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
  24. #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
  25. #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
  26. #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
  27. #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
  28. #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
  29. #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
  30. #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
  31. #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
  32. #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
  33. #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
  34. #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x)))
  35. #define EXYNOS4_MCT_L_MASK (0xffffff00)
  36. #define MCT_L_TCNTB_OFFSET (0x00)
  37. #define MCT_L_ICNTB_OFFSET (0x08)
  38. #define MCT_L_TCON_OFFSET (0x20)
  39. #define MCT_L_INT_CSTAT_OFFSET (0x30)
  40. #define MCT_L_INT_ENB_OFFSET (0x34)
  41. #define MCT_L_WSTAT_OFFSET (0x40)
  42. #define MCT_G_TCON_START (1 << 8)
  43. #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
  44. #define MCT_G_TCON_COMP0_ENABLE (1 << 0)
  45. #define MCT_L_TCON_INTERVAL_MODE (1 << 2)
  46. #define MCT_L_TCON_INT_START (1 << 1)
  47. #define MCT_L_TCON_TIMER_START (1 << 0)
  48. #define TICK_BASE_CNT 1
  49. #ifdef CONFIG_ARM
  50. /* Use values higher than ARM arch timer. See 6282edb72bed. */
  51. #define MCT_CLKSOURCE_RATING 450
  52. #define MCT_CLKEVENTS_RATING 500
  53. #else
  54. #define MCT_CLKSOURCE_RATING 350
  55. #define MCT_CLKEVENTS_RATING 350
  56. #endif
  57. /* There are four Global timers starting with 0 offset */
  58. #define MCT_G0_IRQ 0
  59. /* Local timers count starts after global timer count */
  60. #define MCT_L0_IRQ 4
  61. /* Max number of IRQ as per DT binding document */
  62. #define MCT_NR_IRQS 20
  63. /* Max number of local timers */
  64. #define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ)
  65. enum {
  66. MCT_INT_SPI,
  67. MCT_INT_PPI
  68. };
  69. static void __iomem *reg_base;
  70. static unsigned long clk_rate;
  71. static unsigned int mct_int_type;
  72. static int mct_irqs[MCT_NR_IRQS];
  73. struct mct_clock_event_device {
  74. struct clock_event_device evt;
  75. unsigned long base;
  76. /**
  77. * The length of the name must be adjusted if number of
  78. * local timer interrupts grow over two digits
  79. */
  80. char name[11];
  81. };
  82. static void exynos4_mct_write(unsigned int value, unsigned long offset)
  83. {
  84. unsigned long stat_addr;
  85. u32 mask;
  86. u32 i;
  87. writel_relaxed(value, reg_base + offset);
  88. if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
  89. stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
  90. switch (offset & ~EXYNOS4_MCT_L_MASK) {
  91. case MCT_L_TCON_OFFSET:
  92. mask = 1 << 3; /* L_TCON write status */
  93. break;
  94. case MCT_L_ICNTB_OFFSET:
  95. mask = 1 << 1; /* L_ICNTB write status */
  96. break;
  97. case MCT_L_TCNTB_OFFSET:
  98. mask = 1 << 0; /* L_TCNTB write status */
  99. break;
  100. default:
  101. return;
  102. }
  103. } else {
  104. switch (offset) {
  105. case EXYNOS4_MCT_G_TCON:
  106. stat_addr = EXYNOS4_MCT_G_WSTAT;
  107. mask = 1 << 16; /* G_TCON write status */
  108. break;
  109. case EXYNOS4_MCT_G_COMP0_L:
  110. stat_addr = EXYNOS4_MCT_G_WSTAT;
  111. mask = 1 << 0; /* G_COMP0_L write status */
  112. break;
  113. case EXYNOS4_MCT_G_COMP0_U:
  114. stat_addr = EXYNOS4_MCT_G_WSTAT;
  115. mask = 1 << 1; /* G_COMP0_U write status */
  116. break;
  117. case EXYNOS4_MCT_G_COMP0_ADD_INCR:
  118. stat_addr = EXYNOS4_MCT_G_WSTAT;
  119. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  120. break;
  121. case EXYNOS4_MCT_G_CNT_L:
  122. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  123. mask = 1 << 0; /* G_CNT_L write status */
  124. break;
  125. case EXYNOS4_MCT_G_CNT_U:
  126. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  127. mask = 1 << 1; /* G_CNT_U write status */
  128. break;
  129. default:
  130. return;
  131. }
  132. }
  133. /* Wait maximum 1 ms until written values are applied */
  134. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  135. if (readl_relaxed(reg_base + stat_addr) & mask) {
  136. writel_relaxed(mask, reg_base + stat_addr);
  137. return;
  138. }
  139. panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
  140. }
  141. /* Clocksource handling */
  142. static void exynos4_mct_frc_start(void)
  143. {
  144. u32 reg;
  145. reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  146. reg |= MCT_G_TCON_START;
  147. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  148. }
  149. /**
  150. * exynos4_read_count_64 - Read all 64-bits of the global counter
  151. *
  152. * This will read all 64-bits of the global counter taking care to make sure
  153. * that the upper and lower half match. Note that reading the MCT can be quite
  154. * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
  155. * only) version when possible.
  156. *
  157. * Returns the number of cycles in the global counter.
  158. */
  159. static u64 exynos4_read_count_64(void)
  160. {
  161. unsigned int lo, hi;
  162. u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
  163. do {
  164. hi = hi2;
  165. lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
  166. hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
  167. } while (hi != hi2);
  168. return ((u64)hi << 32) | lo;
  169. }
  170. /**
  171. * exynos4_read_count_32 - Read the lower 32-bits of the global counter
  172. *
  173. * This will read just the lower 32-bits of the global counter. This is marked
  174. * as notrace so it can be used by the scheduler clock.
  175. *
  176. * Returns the number of cycles in the global counter (lower 32 bits).
  177. */
  178. static u32 notrace exynos4_read_count_32(void)
  179. {
  180. return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
  181. }
  182. static u64 exynos4_frc_read(struct clocksource *cs)
  183. {
  184. return exynos4_read_count_32();
  185. }
  186. static void exynos4_frc_resume(struct clocksource *cs)
  187. {
  188. exynos4_mct_frc_start();
  189. }
  190. static struct clocksource mct_frc = {
  191. .name = "mct-frc",
  192. .rating = MCT_CLKSOURCE_RATING,
  193. .read = exynos4_frc_read,
  194. .mask = CLOCKSOURCE_MASK(32),
  195. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  196. .resume = exynos4_frc_resume,
  197. };
  198. static u64 notrace exynos4_read_sched_clock(void)
  199. {
  200. return exynos4_read_count_32();
  201. }
  202. #if defined(CONFIG_ARM)
  203. static struct delay_timer exynos4_delay_timer;
  204. static cycles_t exynos4_read_current_timer(void)
  205. {
  206. BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
  207. "cycles_t needs to move to 32-bit for ARM64 usage");
  208. return exynos4_read_count_32();
  209. }
  210. #endif
  211. static int __init exynos4_clocksource_init(bool frc_shared)
  212. {
  213. /*
  214. * When the frc is shared, the main processer should have already
  215. * turned it on and we shouldn't be writing to TCON.
  216. */
  217. if (frc_shared)
  218. mct_frc.resume = NULL;
  219. else
  220. exynos4_mct_frc_start();
  221. #if defined(CONFIG_ARM)
  222. exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
  223. exynos4_delay_timer.freq = clk_rate;
  224. register_current_timer_delay(&exynos4_delay_timer);
  225. #endif
  226. if (clocksource_register_hz(&mct_frc, clk_rate))
  227. panic("%s: can't register clocksource\n", mct_frc.name);
  228. sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
  229. return 0;
  230. }
  231. static void exynos4_mct_comp0_stop(void)
  232. {
  233. unsigned int tcon;
  234. tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  235. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  236. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  237. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  238. }
  239. static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
  240. {
  241. unsigned int tcon;
  242. u64 comp_cycle;
  243. tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  244. if (periodic) {
  245. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  246. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  247. }
  248. comp_cycle = exynos4_read_count_64() + cycles;
  249. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  250. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  251. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  252. tcon |= MCT_G_TCON_COMP0_ENABLE;
  253. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  254. }
  255. static int exynos4_comp_set_next_event(unsigned long cycles,
  256. struct clock_event_device *evt)
  257. {
  258. exynos4_mct_comp0_start(false, cycles);
  259. return 0;
  260. }
  261. static int mct_set_state_shutdown(struct clock_event_device *evt)
  262. {
  263. exynos4_mct_comp0_stop();
  264. return 0;
  265. }
  266. static int mct_set_state_periodic(struct clock_event_device *evt)
  267. {
  268. unsigned long cycles_per_jiffy;
  269. cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
  270. >> evt->shift);
  271. exynos4_mct_comp0_stop();
  272. exynos4_mct_comp0_start(true, cycles_per_jiffy);
  273. return 0;
  274. }
  275. static struct clock_event_device mct_comp_device = {
  276. .name = "mct-comp",
  277. .features = CLOCK_EVT_FEAT_PERIODIC |
  278. CLOCK_EVT_FEAT_ONESHOT,
  279. .rating = 250,
  280. .set_next_event = exynos4_comp_set_next_event,
  281. .set_state_periodic = mct_set_state_periodic,
  282. .set_state_shutdown = mct_set_state_shutdown,
  283. .set_state_oneshot = mct_set_state_shutdown,
  284. .set_state_oneshot_stopped = mct_set_state_shutdown,
  285. .tick_resume = mct_set_state_shutdown,
  286. };
  287. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  288. {
  289. struct clock_event_device *evt = dev_id;
  290. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  291. evt->event_handler(evt);
  292. return IRQ_HANDLED;
  293. }
  294. static int exynos4_clockevent_init(void)
  295. {
  296. mct_comp_device.cpumask = cpumask_of(0);
  297. clockevents_config_and_register(&mct_comp_device, clk_rate,
  298. 0xf, 0xffffffff);
  299. if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
  300. IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
  301. &mct_comp_device))
  302. pr_err("%s: request_irq() failed\n", "mct_comp_irq");
  303. return 0;
  304. }
  305. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  306. /* Clock event handling */
  307. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  308. {
  309. unsigned long tmp;
  310. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  311. unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
  312. tmp = readl_relaxed(reg_base + offset);
  313. if (tmp & mask) {
  314. tmp &= ~mask;
  315. exynos4_mct_write(tmp, offset);
  316. }
  317. }
  318. static void exynos4_mct_tick_start(unsigned long cycles,
  319. struct mct_clock_event_device *mevt)
  320. {
  321. unsigned long tmp;
  322. exynos4_mct_tick_stop(mevt);
  323. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  324. /* update interrupt count buffer */
  325. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  326. /* enable MCT tick interrupt */
  327. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  328. tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
  329. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  330. MCT_L_TCON_INTERVAL_MODE;
  331. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  332. }
  333. static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  334. {
  335. /* Clear the MCT tick interrupt */
  336. if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
  337. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  338. }
  339. static int exynos4_tick_set_next_event(unsigned long cycles,
  340. struct clock_event_device *evt)
  341. {
  342. struct mct_clock_event_device *mevt;
  343. mevt = container_of(evt, struct mct_clock_event_device, evt);
  344. exynos4_mct_tick_start(cycles, mevt);
  345. return 0;
  346. }
  347. static int set_state_shutdown(struct clock_event_device *evt)
  348. {
  349. struct mct_clock_event_device *mevt;
  350. mevt = container_of(evt, struct mct_clock_event_device, evt);
  351. exynos4_mct_tick_stop(mevt);
  352. exynos4_mct_tick_clear(mevt);
  353. return 0;
  354. }
  355. static int set_state_periodic(struct clock_event_device *evt)
  356. {
  357. struct mct_clock_event_device *mevt;
  358. unsigned long cycles_per_jiffy;
  359. mevt = container_of(evt, struct mct_clock_event_device, evt);
  360. cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
  361. >> evt->shift);
  362. exynos4_mct_tick_stop(mevt);
  363. exynos4_mct_tick_start(cycles_per_jiffy, mevt);
  364. return 0;
  365. }
  366. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  367. {
  368. struct mct_clock_event_device *mevt = dev_id;
  369. struct clock_event_device *evt = &mevt->evt;
  370. /*
  371. * This is for supporting oneshot mode.
  372. * Mct would generate interrupt periodically
  373. * without explicit stopping.
  374. */
  375. if (!clockevent_state_periodic(&mevt->evt))
  376. exynos4_mct_tick_stop(mevt);
  377. exynos4_mct_tick_clear(mevt);
  378. evt->event_handler(evt);
  379. return IRQ_HANDLED;
  380. }
  381. static int exynos4_mct_starting_cpu(unsigned int cpu)
  382. {
  383. struct mct_clock_event_device *mevt =
  384. per_cpu_ptr(&percpu_mct_tick, cpu);
  385. struct clock_event_device *evt = &mevt->evt;
  386. snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
  387. evt->name = mevt->name;
  388. evt->cpumask = cpumask_of(cpu);
  389. evt->set_next_event = exynos4_tick_set_next_event;
  390. evt->set_state_periodic = set_state_periodic;
  391. evt->set_state_shutdown = set_state_shutdown;
  392. evt->set_state_oneshot = set_state_shutdown;
  393. evt->set_state_oneshot_stopped = set_state_shutdown;
  394. evt->tick_resume = set_state_shutdown;
  395. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
  396. CLOCK_EVT_FEAT_PERCPU;
  397. evt->rating = MCT_CLKEVENTS_RATING;
  398. exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
  399. if (mct_int_type == MCT_INT_SPI) {
  400. if (evt->irq == -1)
  401. return -EIO;
  402. irq_force_affinity(evt->irq, cpumask_of(cpu));
  403. enable_irq(evt->irq);
  404. } else {
  405. enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
  406. }
  407. clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
  408. 0xf, 0x7fffffff);
  409. return 0;
  410. }
  411. static int exynos4_mct_dying_cpu(unsigned int cpu)
  412. {
  413. struct mct_clock_event_device *mevt =
  414. per_cpu_ptr(&percpu_mct_tick, cpu);
  415. struct clock_event_device *evt = &mevt->evt;
  416. evt->set_state_shutdown(evt);
  417. if (mct_int_type == MCT_INT_SPI) {
  418. if (evt->irq != -1)
  419. disable_irq_nosync(evt->irq);
  420. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  421. } else {
  422. disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
  423. }
  424. return 0;
  425. }
  426. static int __init exynos4_timer_resources(struct device_node *np)
  427. {
  428. struct clk *mct_clk, *tick_clk;
  429. reg_base = of_iomap(np, 0);
  430. if (!reg_base)
  431. panic("%s: unable to ioremap mct address space\n", __func__);
  432. tick_clk = of_clk_get_by_name(np, "fin_pll");
  433. if (IS_ERR(tick_clk))
  434. panic("%s: unable to determine tick clock rate\n", __func__);
  435. clk_rate = clk_get_rate(tick_clk);
  436. mct_clk = of_clk_get_by_name(np, "mct");
  437. if (IS_ERR(mct_clk))
  438. panic("%s: unable to retrieve mct clock instance\n", __func__);
  439. clk_prepare_enable(mct_clk);
  440. return 0;
  441. }
  442. /**
  443. * exynos4_timer_interrupts - initialize MCT interrupts
  444. * @np: device node for MCT
  445. * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI
  446. * @local_idx: array mapping CPU numbers to local timer indices
  447. * @nr_local: size of @local_idx array
  448. */
  449. static int __init exynos4_timer_interrupts(struct device_node *np,
  450. unsigned int int_type,
  451. const u32 *local_idx,
  452. size_t nr_local)
  453. {
  454. int nr_irqs, i, err, cpu;
  455. mct_int_type = int_type;
  456. /* This driver uses only one global timer interrupt */
  457. mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
  458. /*
  459. * Find out the number of local irqs specified. The local
  460. * timer irqs are specified after the four global timer
  461. * irqs are specified.
  462. */
  463. nr_irqs = of_irq_count(np);
  464. if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
  465. pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
  466. nr_irqs);
  467. nr_irqs = ARRAY_SIZE(mct_irqs);
  468. }
  469. for (i = MCT_L0_IRQ; i < nr_irqs; i++)
  470. mct_irqs[i] = irq_of_parse_and_map(np, i);
  471. if (mct_int_type == MCT_INT_PPI) {
  472. err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
  473. exynos4_mct_tick_isr, "MCT",
  474. &percpu_mct_tick);
  475. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  476. mct_irqs[MCT_L0_IRQ], err);
  477. } else {
  478. for_each_possible_cpu(cpu) {
  479. int mct_irq;
  480. unsigned int irq_idx;
  481. struct mct_clock_event_device *pcpu_mevt =
  482. per_cpu_ptr(&percpu_mct_tick, cpu);
  483. if (cpu >= nr_local) {
  484. err = -EINVAL;
  485. goto out_irq;
  486. }
  487. irq_idx = MCT_L0_IRQ + local_idx[cpu];
  488. pcpu_mevt->evt.irq = -1;
  489. if (irq_idx >= ARRAY_SIZE(mct_irqs))
  490. break;
  491. mct_irq = mct_irqs[irq_idx];
  492. irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
  493. if (request_irq(mct_irq,
  494. exynos4_mct_tick_isr,
  495. IRQF_TIMER | IRQF_NOBALANCING,
  496. pcpu_mevt->name, pcpu_mevt)) {
  497. pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
  498. cpu);
  499. continue;
  500. }
  501. pcpu_mevt->evt.irq = mct_irq;
  502. }
  503. }
  504. for_each_possible_cpu(cpu) {
  505. struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
  506. if (cpu >= nr_local) {
  507. err = -EINVAL;
  508. goto out_irq;
  509. }
  510. mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
  511. }
  512. /* Install hotplug callbacks which configure the timer on this CPU */
  513. err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
  514. "clockevents/exynos4/mct_timer:starting",
  515. exynos4_mct_starting_cpu,
  516. exynos4_mct_dying_cpu);
  517. if (err)
  518. goto out_irq;
  519. return 0;
  520. out_irq:
  521. if (mct_int_type == MCT_INT_PPI) {
  522. free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
  523. } else {
  524. for_each_possible_cpu(cpu) {
  525. struct mct_clock_event_device *pcpu_mevt =
  526. per_cpu_ptr(&percpu_mct_tick, cpu);
  527. if (pcpu_mevt->evt.irq != -1) {
  528. free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
  529. pcpu_mevt->evt.irq = -1;
  530. }
  531. }
  532. }
  533. return err;
  534. }
  535. static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
  536. {
  537. bool frc_shared = of_property_read_bool(np, "samsung,frc-shared");
  538. u32 local_idx[MCT_NR_LOCAL] = {0};
  539. int nr_local;
  540. int ret;
  541. nr_local = of_property_count_u32_elems(np, "samsung,local-timers");
  542. if (nr_local == 0)
  543. return -EINVAL;
  544. if (nr_local > 0) {
  545. if (nr_local > ARRAY_SIZE(local_idx))
  546. return -EINVAL;
  547. ret = of_property_read_u32_array(np, "samsung,local-timers",
  548. local_idx, nr_local);
  549. if (ret)
  550. return ret;
  551. } else {
  552. int i;
  553. nr_local = ARRAY_SIZE(local_idx);
  554. for (i = 0; i < nr_local; i++)
  555. local_idx[i] = i;
  556. }
  557. ret = exynos4_timer_resources(np);
  558. if (ret)
  559. return ret;
  560. ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local);
  561. if (ret)
  562. return ret;
  563. ret = exynos4_clocksource_init(frc_shared);
  564. if (ret)
  565. return ret;
  566. /*
  567. * When the FRC is shared with a main processor, this secondary
  568. * processor cannot use the global comparator.
  569. */
  570. if (frc_shared)
  571. return ret;
  572. return exynos4_clockevent_init();
  573. }
  574. static int __init mct_init_spi(struct device_node *np)
  575. {
  576. return mct_init_dt(np, MCT_INT_SPI);
  577. }
  578. static int __init mct_init_ppi(struct device_node *np)
  579. {
  580. return mct_init_dt(np, MCT_INT_PPI);
  581. }
  582. TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
  583. TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);