sh_cmt.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SuperH Timer Support - CMT
  4. *
  5. * Copyright (C) 2008 Magnus Damm
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/clockchips.h>
  9. #include <linux/clocksource.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/ioport.h>
  17. #include <linux/irq.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/of_device.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_domain.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/sh_timer.h>
  25. #include <linux/slab.h>
  26. #include <linux/spinlock.h>
  27. #ifdef CONFIG_SUPERH
  28. #include <asm/platform_early.h>
  29. #endif
  30. struct sh_cmt_device;
  31. /*
  32. * The CMT comes in 5 different identified flavours, depending not only on the
  33. * SoC but also on the particular instance. The following table lists the main
  34. * characteristics of those flavours.
  35. *
  36. * 16B 32B 32B-F 48B R-Car Gen2
  37. * -----------------------------------------------------------------------------
  38. * Channels 2 1/4 1 6 2/8
  39. * Control Width 16 16 16 16 32
  40. * Counter Width 16 32 32 32/48 32/48
  41. * Shared Start/Stop Y Y Y Y N
  42. *
  43. * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
  44. * located in the channel registers block. All other versions have a shared
  45. * start/stop register located in the global space.
  46. *
  47. * Channels are indexed from 0 to N-1 in the documentation. The channel index
  48. * infers the start/stop bit position in the control register and the channel
  49. * registers block address. Some CMT instances have a subset of channels
  50. * available, in which case the index in the documentation doesn't match the
  51. * "real" index as implemented in hardware. This is for instance the case with
  52. * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  53. * in the documentation but using start/stop bit 5 and having its registers
  54. * block at 0x60.
  55. *
  56. * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  57. * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  58. */
  59. enum sh_cmt_model {
  60. SH_CMT_16BIT,
  61. SH_CMT_32BIT,
  62. SH_CMT_48BIT,
  63. SH_CMT0_RCAR_GEN2,
  64. SH_CMT1_RCAR_GEN2,
  65. };
  66. struct sh_cmt_info {
  67. enum sh_cmt_model model;
  68. unsigned int channels_mask;
  69. unsigned long width; /* 16 or 32 bit version of hardware block */
  70. u32 overflow_bit;
  71. u32 clear_bits;
  72. /* callbacks for CMSTR and CMCSR access */
  73. u32 (*read_control)(void __iomem *base, unsigned long offs);
  74. void (*write_control)(void __iomem *base, unsigned long offs,
  75. u32 value);
  76. /* callbacks for CMCNT and CMCOR access */
  77. u32 (*read_count)(void __iomem *base, unsigned long offs);
  78. void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
  79. };
  80. struct sh_cmt_channel {
  81. struct sh_cmt_device *cmt;
  82. unsigned int index; /* Index in the documentation */
  83. unsigned int hwidx; /* Real hardware index */
  84. void __iomem *iostart;
  85. void __iomem *ioctrl;
  86. unsigned int timer_bit;
  87. unsigned long flags;
  88. u32 match_value;
  89. u32 next_match_value;
  90. u32 max_match_value;
  91. raw_spinlock_t lock;
  92. struct clock_event_device ced;
  93. struct clocksource cs;
  94. u64 total_cycles;
  95. bool cs_enabled;
  96. };
  97. struct sh_cmt_device {
  98. struct platform_device *pdev;
  99. const struct sh_cmt_info *info;
  100. void __iomem *mapbase;
  101. struct clk *clk;
  102. unsigned long rate;
  103. unsigned int reg_delay;
  104. raw_spinlock_t lock; /* Protect the shared start/stop register */
  105. struct sh_cmt_channel *channels;
  106. unsigned int num_channels;
  107. unsigned int hw_channels;
  108. bool has_clockevent;
  109. bool has_clocksource;
  110. };
  111. #define SH_CMT16_CMCSR_CMF (1 << 7)
  112. #define SH_CMT16_CMCSR_CMIE (1 << 6)
  113. #define SH_CMT16_CMCSR_CKS8 (0 << 0)
  114. #define SH_CMT16_CMCSR_CKS32 (1 << 0)
  115. #define SH_CMT16_CMCSR_CKS128 (2 << 0)
  116. #define SH_CMT16_CMCSR_CKS512 (3 << 0)
  117. #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
  118. #define SH_CMT32_CMCSR_CMF (1 << 15)
  119. #define SH_CMT32_CMCSR_OVF (1 << 14)
  120. #define SH_CMT32_CMCSR_WRFLG (1 << 13)
  121. #define SH_CMT32_CMCSR_STTF (1 << 12)
  122. #define SH_CMT32_CMCSR_STPF (1 << 11)
  123. #define SH_CMT32_CMCSR_SSIE (1 << 10)
  124. #define SH_CMT32_CMCSR_CMS (1 << 9)
  125. #define SH_CMT32_CMCSR_CMM (1 << 8)
  126. #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
  127. #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
  128. #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
  129. #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
  130. #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
  131. #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
  132. #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
  133. #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
  134. #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
  135. #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
  136. #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
  137. static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
  138. {
  139. return ioread16(base + (offs << 1));
  140. }
  141. static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
  142. {
  143. return ioread32(base + (offs << 2));
  144. }
  145. static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
  146. {
  147. iowrite16(value, base + (offs << 1));
  148. }
  149. static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
  150. {
  151. iowrite32(value, base + (offs << 2));
  152. }
  153. static const struct sh_cmt_info sh_cmt_info[] = {
  154. [SH_CMT_16BIT] = {
  155. .model = SH_CMT_16BIT,
  156. .width = 16,
  157. .overflow_bit = SH_CMT16_CMCSR_CMF,
  158. .clear_bits = ~SH_CMT16_CMCSR_CMF,
  159. .read_control = sh_cmt_read16,
  160. .write_control = sh_cmt_write16,
  161. .read_count = sh_cmt_read16,
  162. .write_count = sh_cmt_write16,
  163. },
  164. [SH_CMT_32BIT] = {
  165. .model = SH_CMT_32BIT,
  166. .width = 32,
  167. .overflow_bit = SH_CMT32_CMCSR_CMF,
  168. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  169. .read_control = sh_cmt_read16,
  170. .write_control = sh_cmt_write16,
  171. .read_count = sh_cmt_read32,
  172. .write_count = sh_cmt_write32,
  173. },
  174. [SH_CMT_48BIT] = {
  175. .model = SH_CMT_48BIT,
  176. .channels_mask = 0x3f,
  177. .width = 32,
  178. .overflow_bit = SH_CMT32_CMCSR_CMF,
  179. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  180. .read_control = sh_cmt_read32,
  181. .write_control = sh_cmt_write32,
  182. .read_count = sh_cmt_read32,
  183. .write_count = sh_cmt_write32,
  184. },
  185. [SH_CMT0_RCAR_GEN2] = {
  186. .model = SH_CMT0_RCAR_GEN2,
  187. .channels_mask = 0x60,
  188. .width = 32,
  189. .overflow_bit = SH_CMT32_CMCSR_CMF,
  190. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  191. .read_control = sh_cmt_read32,
  192. .write_control = sh_cmt_write32,
  193. .read_count = sh_cmt_read32,
  194. .write_count = sh_cmt_write32,
  195. },
  196. [SH_CMT1_RCAR_GEN2] = {
  197. .model = SH_CMT1_RCAR_GEN2,
  198. .channels_mask = 0xff,
  199. .width = 32,
  200. .overflow_bit = SH_CMT32_CMCSR_CMF,
  201. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  202. .read_control = sh_cmt_read32,
  203. .write_control = sh_cmt_write32,
  204. .read_count = sh_cmt_read32,
  205. .write_count = sh_cmt_write32,
  206. },
  207. };
  208. #define CMCSR 0 /* channel register */
  209. #define CMCNT 1 /* channel register */
  210. #define CMCOR 2 /* channel register */
  211. #define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
  212. static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
  213. {
  214. if (ch->iostart)
  215. return ch->cmt->info->read_control(ch->iostart, 0);
  216. else
  217. return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
  218. }
  219. static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
  220. {
  221. u32 old_value = sh_cmt_read_cmstr(ch);
  222. if (value != old_value) {
  223. if (ch->iostart) {
  224. ch->cmt->info->write_control(ch->iostart, 0, value);
  225. udelay(ch->cmt->reg_delay);
  226. } else {
  227. ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
  228. udelay(ch->cmt->reg_delay);
  229. }
  230. }
  231. }
  232. static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
  233. {
  234. return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
  235. }
  236. static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
  237. {
  238. u32 old_value = sh_cmt_read_cmcsr(ch);
  239. if (value != old_value) {
  240. ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
  241. udelay(ch->cmt->reg_delay);
  242. }
  243. }
  244. static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
  245. {
  246. return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
  247. }
  248. static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
  249. {
  250. /* Tests showed that we need to wait 3 clocks here */
  251. unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
  252. u32 reg;
  253. if (ch->cmt->info->model > SH_CMT_16BIT) {
  254. int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
  255. !(reg & SH_CMT32_CMCSR_WRFLG),
  256. 1, cmcnt_delay, false, ch);
  257. if (ret < 0)
  258. return ret;
  259. }
  260. ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
  261. udelay(cmcnt_delay);
  262. return 0;
  263. }
  264. static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
  265. {
  266. u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
  267. if (value != old_value) {
  268. ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
  269. udelay(ch->cmt->reg_delay);
  270. }
  271. }
  272. static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
  273. {
  274. u32 v1, v2, v3;
  275. u32 o1, o2;
  276. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  277. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  278. do {
  279. o2 = o1;
  280. v1 = sh_cmt_read_cmcnt(ch);
  281. v2 = sh_cmt_read_cmcnt(ch);
  282. v3 = sh_cmt_read_cmcnt(ch);
  283. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  284. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  285. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  286. *has_wrapped = o1;
  287. return v2;
  288. }
  289. static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
  290. {
  291. unsigned long flags;
  292. u32 value;
  293. /* start stop register shared by multiple timer channels */
  294. raw_spin_lock_irqsave(&ch->cmt->lock, flags);
  295. value = sh_cmt_read_cmstr(ch);
  296. if (start)
  297. value |= 1 << ch->timer_bit;
  298. else
  299. value &= ~(1 << ch->timer_bit);
  300. sh_cmt_write_cmstr(ch, value);
  301. raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
  302. }
  303. static int sh_cmt_enable(struct sh_cmt_channel *ch)
  304. {
  305. int ret;
  306. dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
  307. /* enable clock */
  308. ret = clk_enable(ch->cmt->clk);
  309. if (ret) {
  310. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
  311. ch->index);
  312. goto err0;
  313. }
  314. /* make sure channel is disabled */
  315. sh_cmt_start_stop_ch(ch, 0);
  316. /* configure channel, periodic mode and maximum timeout */
  317. if (ch->cmt->info->width == 16) {
  318. sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
  319. SH_CMT16_CMCSR_CKS512);
  320. } else {
  321. u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
  322. SH_CMT32_CMCSR_CMTOUT_IE : 0;
  323. sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
  324. SH_CMT32_CMCSR_CMR_IRQ |
  325. SH_CMT32_CMCSR_CKS_RCLK8);
  326. }
  327. sh_cmt_write_cmcor(ch, 0xffffffff);
  328. ret = sh_cmt_write_cmcnt(ch, 0);
  329. if (ret || sh_cmt_read_cmcnt(ch)) {
  330. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
  331. ch->index);
  332. ret = -ETIMEDOUT;
  333. goto err1;
  334. }
  335. /* enable channel */
  336. sh_cmt_start_stop_ch(ch, 1);
  337. return 0;
  338. err1:
  339. /* stop clock */
  340. clk_disable(ch->cmt->clk);
  341. err0:
  342. return ret;
  343. }
  344. static void sh_cmt_disable(struct sh_cmt_channel *ch)
  345. {
  346. /* disable channel */
  347. sh_cmt_start_stop_ch(ch, 0);
  348. /* disable interrupts in CMT block */
  349. sh_cmt_write_cmcsr(ch, 0);
  350. /* stop clock */
  351. clk_disable(ch->cmt->clk);
  352. dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
  353. }
  354. /* private flags */
  355. #define FLAG_CLOCKEVENT (1 << 0)
  356. #define FLAG_CLOCKSOURCE (1 << 1)
  357. #define FLAG_REPROGRAM (1 << 2)
  358. #define FLAG_SKIPEVENT (1 << 3)
  359. #define FLAG_IRQCONTEXT (1 << 4)
  360. static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
  361. int absolute)
  362. {
  363. u32 value = ch->next_match_value;
  364. u32 new_match;
  365. u32 delay = 0;
  366. u32 now = 0;
  367. u32 has_wrapped;
  368. now = sh_cmt_get_counter(ch, &has_wrapped);
  369. ch->flags |= FLAG_REPROGRAM; /* force reprogram */
  370. if (has_wrapped) {
  371. /* we're competing with the interrupt handler.
  372. * -> let the interrupt handler reprogram the timer.
  373. * -> interrupt number two handles the event.
  374. */
  375. ch->flags |= FLAG_SKIPEVENT;
  376. return;
  377. }
  378. if (absolute)
  379. now = 0;
  380. do {
  381. /* reprogram the timer hardware,
  382. * but don't save the new match value yet.
  383. */
  384. new_match = now + value + delay;
  385. if (new_match > ch->max_match_value)
  386. new_match = ch->max_match_value;
  387. sh_cmt_write_cmcor(ch, new_match);
  388. now = sh_cmt_get_counter(ch, &has_wrapped);
  389. if (has_wrapped && (new_match > ch->match_value)) {
  390. /* we are changing to a greater match value,
  391. * so this wrap must be caused by the counter
  392. * matching the old value.
  393. * -> first interrupt reprograms the timer.
  394. * -> interrupt number two handles the event.
  395. */
  396. ch->flags |= FLAG_SKIPEVENT;
  397. break;
  398. }
  399. if (has_wrapped) {
  400. /* we are changing to a smaller match value,
  401. * so the wrap must be caused by the counter
  402. * matching the new value.
  403. * -> save programmed match value.
  404. * -> let isr handle the event.
  405. */
  406. ch->match_value = new_match;
  407. break;
  408. }
  409. /* be safe: verify hardware settings */
  410. if (now < new_match) {
  411. /* timer value is below match value, all good.
  412. * this makes sure we won't miss any match events.
  413. * -> save programmed match value.
  414. * -> let isr handle the event.
  415. */
  416. ch->match_value = new_match;
  417. break;
  418. }
  419. /* the counter has reached a value greater
  420. * than our new match value. and since the
  421. * has_wrapped flag isn't set we must have
  422. * programmed a too close event.
  423. * -> increase delay and retry.
  424. */
  425. if (delay)
  426. delay <<= 1;
  427. else
  428. delay = 1;
  429. if (!delay)
  430. dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
  431. ch->index);
  432. } while (delay);
  433. }
  434. static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  435. {
  436. if (delta > ch->max_match_value)
  437. dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
  438. ch->index);
  439. ch->next_match_value = delta;
  440. sh_cmt_clock_event_program_verify(ch, 0);
  441. }
  442. static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  443. {
  444. unsigned long flags;
  445. raw_spin_lock_irqsave(&ch->lock, flags);
  446. __sh_cmt_set_next(ch, delta);
  447. raw_spin_unlock_irqrestore(&ch->lock, flags);
  448. }
  449. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  450. {
  451. struct sh_cmt_channel *ch = dev_id;
  452. /* clear flags */
  453. sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
  454. ch->cmt->info->clear_bits);
  455. /* update clock source counter to begin with if enabled
  456. * the wrap flag should be cleared by the timer specific
  457. * isr before we end up here.
  458. */
  459. if (ch->flags & FLAG_CLOCKSOURCE)
  460. ch->total_cycles += ch->match_value + 1;
  461. if (!(ch->flags & FLAG_REPROGRAM))
  462. ch->next_match_value = ch->max_match_value;
  463. ch->flags |= FLAG_IRQCONTEXT;
  464. if (ch->flags & FLAG_CLOCKEVENT) {
  465. if (!(ch->flags & FLAG_SKIPEVENT)) {
  466. if (clockevent_state_oneshot(&ch->ced)) {
  467. ch->next_match_value = ch->max_match_value;
  468. ch->flags |= FLAG_REPROGRAM;
  469. }
  470. ch->ced.event_handler(&ch->ced);
  471. }
  472. }
  473. ch->flags &= ~FLAG_SKIPEVENT;
  474. if (ch->flags & FLAG_REPROGRAM) {
  475. ch->flags &= ~FLAG_REPROGRAM;
  476. sh_cmt_clock_event_program_verify(ch, 1);
  477. if (ch->flags & FLAG_CLOCKEVENT)
  478. if ((clockevent_state_shutdown(&ch->ced))
  479. || (ch->match_value == ch->next_match_value))
  480. ch->flags &= ~FLAG_REPROGRAM;
  481. }
  482. ch->flags &= ~FLAG_IRQCONTEXT;
  483. return IRQ_HANDLED;
  484. }
  485. static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
  486. {
  487. int ret = 0;
  488. unsigned long flags;
  489. if (flag & FLAG_CLOCKSOURCE)
  490. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  491. raw_spin_lock_irqsave(&ch->lock, flags);
  492. if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
  493. if (flag & FLAG_CLOCKEVENT)
  494. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  495. ret = sh_cmt_enable(ch);
  496. }
  497. if (ret)
  498. goto out;
  499. ch->flags |= flag;
  500. /* setup timeout if no clockevent */
  501. if (ch->cmt->num_channels == 1 &&
  502. flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
  503. __sh_cmt_set_next(ch, ch->max_match_value);
  504. out:
  505. raw_spin_unlock_irqrestore(&ch->lock, flags);
  506. return ret;
  507. }
  508. static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
  509. {
  510. unsigned long flags;
  511. unsigned long f;
  512. raw_spin_lock_irqsave(&ch->lock, flags);
  513. f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  514. ch->flags &= ~flag;
  515. if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
  516. sh_cmt_disable(ch);
  517. if (flag & FLAG_CLOCKEVENT)
  518. pm_runtime_put(&ch->cmt->pdev->dev);
  519. }
  520. /* adjust the timeout to maximum if only clocksource left */
  521. if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
  522. __sh_cmt_set_next(ch, ch->max_match_value);
  523. raw_spin_unlock_irqrestore(&ch->lock, flags);
  524. if (flag & FLAG_CLOCKSOURCE)
  525. pm_runtime_put(&ch->cmt->pdev->dev);
  526. }
  527. static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
  528. {
  529. return container_of(cs, struct sh_cmt_channel, cs);
  530. }
  531. static u64 sh_cmt_clocksource_read(struct clocksource *cs)
  532. {
  533. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  534. u32 has_wrapped;
  535. if (ch->cmt->num_channels == 1) {
  536. unsigned long flags;
  537. u64 value;
  538. u32 raw;
  539. raw_spin_lock_irqsave(&ch->lock, flags);
  540. value = ch->total_cycles;
  541. raw = sh_cmt_get_counter(ch, &has_wrapped);
  542. if (unlikely(has_wrapped))
  543. raw += ch->match_value + 1;
  544. raw_spin_unlock_irqrestore(&ch->lock, flags);
  545. return value + raw;
  546. }
  547. return sh_cmt_get_counter(ch, &has_wrapped);
  548. }
  549. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  550. {
  551. int ret;
  552. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  553. WARN_ON(ch->cs_enabled);
  554. ch->total_cycles = 0;
  555. ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  556. if (!ret)
  557. ch->cs_enabled = true;
  558. return ret;
  559. }
  560. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  561. {
  562. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  563. WARN_ON(!ch->cs_enabled);
  564. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  565. ch->cs_enabled = false;
  566. }
  567. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  568. {
  569. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  570. if (!ch->cs_enabled)
  571. return;
  572. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  573. dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
  574. }
  575. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  576. {
  577. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  578. if (!ch->cs_enabled)
  579. return;
  580. dev_pm_genpd_resume(&ch->cmt->pdev->dev);
  581. sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  582. }
  583. static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
  584. const char *name)
  585. {
  586. struct clocksource *cs = &ch->cs;
  587. cs->name = name;
  588. cs->rating = 125;
  589. cs->read = sh_cmt_clocksource_read;
  590. cs->enable = sh_cmt_clocksource_enable;
  591. cs->disable = sh_cmt_clocksource_disable;
  592. cs->suspend = sh_cmt_clocksource_suspend;
  593. cs->resume = sh_cmt_clocksource_resume;
  594. cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
  595. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  596. dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
  597. ch->index);
  598. clocksource_register_hz(cs, ch->cmt->rate);
  599. return 0;
  600. }
  601. static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
  602. {
  603. return container_of(ced, struct sh_cmt_channel, ced);
  604. }
  605. static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
  606. {
  607. sh_cmt_start(ch, FLAG_CLOCKEVENT);
  608. if (periodic)
  609. sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
  610. else
  611. sh_cmt_set_next(ch, ch->max_match_value);
  612. }
  613. static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
  614. {
  615. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  616. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  617. return 0;
  618. }
  619. static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
  620. int periodic)
  621. {
  622. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  623. /* deal with old setting first */
  624. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  625. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  626. dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
  627. ch->index, periodic ? "periodic" : "oneshot");
  628. sh_cmt_clock_event_start(ch, periodic);
  629. return 0;
  630. }
  631. static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
  632. {
  633. return sh_cmt_clock_event_set_state(ced, 0);
  634. }
  635. static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
  636. {
  637. return sh_cmt_clock_event_set_state(ced, 1);
  638. }
  639. static int sh_cmt_clock_event_next(unsigned long delta,
  640. struct clock_event_device *ced)
  641. {
  642. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  643. BUG_ON(!clockevent_state_oneshot(ced));
  644. if (likely(ch->flags & FLAG_IRQCONTEXT))
  645. ch->next_match_value = delta - 1;
  646. else
  647. sh_cmt_set_next(ch, delta - 1);
  648. return 0;
  649. }
  650. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  651. {
  652. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  653. dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
  654. clk_unprepare(ch->cmt->clk);
  655. }
  656. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  657. {
  658. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  659. clk_prepare(ch->cmt->clk);
  660. dev_pm_genpd_resume(&ch->cmt->pdev->dev);
  661. }
  662. static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
  663. const char *name)
  664. {
  665. struct clock_event_device *ced = &ch->ced;
  666. int irq;
  667. int ret;
  668. irq = platform_get_irq(ch->cmt->pdev, ch->index);
  669. if (irq < 0)
  670. return irq;
  671. ret = request_irq(irq, sh_cmt_interrupt,
  672. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  673. dev_name(&ch->cmt->pdev->dev), ch);
  674. if (ret) {
  675. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
  676. ch->index, irq);
  677. return ret;
  678. }
  679. ced->name = name;
  680. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  681. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  682. ced->rating = 125;
  683. ced->cpumask = cpu_possible_mask;
  684. ced->set_next_event = sh_cmt_clock_event_next;
  685. ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
  686. ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
  687. ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
  688. ced->suspend = sh_cmt_clock_event_suspend;
  689. ced->resume = sh_cmt_clock_event_resume;
  690. /* TODO: calculate good shift from rate and counter bit width */
  691. ced->shift = 32;
  692. ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
  693. ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
  694. ced->max_delta_ticks = ch->max_match_value;
  695. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  696. ced->min_delta_ticks = 0x1f;
  697. dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
  698. ch->index);
  699. clockevents_register_device(ced);
  700. return 0;
  701. }
  702. static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
  703. bool clockevent, bool clocksource)
  704. {
  705. int ret;
  706. if (clockevent) {
  707. ch->cmt->has_clockevent = true;
  708. ret = sh_cmt_register_clockevent(ch, name);
  709. if (ret < 0)
  710. return ret;
  711. }
  712. if (clocksource) {
  713. ch->cmt->has_clocksource = true;
  714. sh_cmt_register_clocksource(ch, name);
  715. }
  716. return 0;
  717. }
  718. static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
  719. unsigned int hwidx, bool clockevent,
  720. bool clocksource, struct sh_cmt_device *cmt)
  721. {
  722. u32 value;
  723. int ret;
  724. /* Skip unused channels. */
  725. if (!clockevent && !clocksource)
  726. return 0;
  727. ch->cmt = cmt;
  728. ch->index = index;
  729. ch->hwidx = hwidx;
  730. ch->timer_bit = hwidx;
  731. /*
  732. * Compute the address of the channel control register block. For the
  733. * timers with a per-channel start/stop register, compute its address
  734. * as well.
  735. */
  736. switch (cmt->info->model) {
  737. case SH_CMT_16BIT:
  738. ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
  739. break;
  740. case SH_CMT_32BIT:
  741. case SH_CMT_48BIT:
  742. ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
  743. break;
  744. case SH_CMT0_RCAR_GEN2:
  745. case SH_CMT1_RCAR_GEN2:
  746. ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
  747. ch->ioctrl = ch->iostart + 0x10;
  748. ch->timer_bit = 0;
  749. /* Enable the clock supply to the channel */
  750. value = ioread32(cmt->mapbase + CMCLKE);
  751. value |= BIT(hwidx);
  752. iowrite32(value, cmt->mapbase + CMCLKE);
  753. break;
  754. }
  755. if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
  756. ch->max_match_value = ~0;
  757. else
  758. ch->max_match_value = (1 << cmt->info->width) - 1;
  759. ch->match_value = ch->max_match_value;
  760. raw_spin_lock_init(&ch->lock);
  761. ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
  762. clockevent, clocksource);
  763. if (ret) {
  764. dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
  765. ch->index);
  766. return ret;
  767. }
  768. ch->cs_enabled = false;
  769. return 0;
  770. }
  771. static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
  772. {
  773. struct resource *mem;
  774. mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
  775. if (!mem) {
  776. dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
  777. return -ENXIO;
  778. }
  779. cmt->mapbase = ioremap(mem->start, resource_size(mem));
  780. if (cmt->mapbase == NULL) {
  781. dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
  782. return -ENXIO;
  783. }
  784. return 0;
  785. }
  786. static const struct platform_device_id sh_cmt_id_table[] = {
  787. { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
  788. { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
  789. { }
  790. };
  791. MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
  792. static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
  793. {
  794. /* deprecated, preserved for backward compatibility */
  795. .compatible = "renesas,cmt-48",
  796. .data = &sh_cmt_info[SH_CMT_48BIT]
  797. },
  798. {
  799. /* deprecated, preserved for backward compatibility */
  800. .compatible = "renesas,cmt-48-gen2",
  801. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  802. },
  803. {
  804. .compatible = "renesas,r8a7740-cmt1",
  805. .data = &sh_cmt_info[SH_CMT_48BIT]
  806. },
  807. {
  808. .compatible = "renesas,sh73a0-cmt1",
  809. .data = &sh_cmt_info[SH_CMT_48BIT]
  810. },
  811. {
  812. .compatible = "renesas,rcar-gen2-cmt0",
  813. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  814. },
  815. {
  816. .compatible = "renesas,rcar-gen2-cmt1",
  817. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  818. },
  819. {
  820. .compatible = "renesas,rcar-gen3-cmt0",
  821. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  822. },
  823. {
  824. .compatible = "renesas,rcar-gen3-cmt1",
  825. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  826. },
  827. {
  828. .compatible = "renesas,rcar-gen4-cmt0",
  829. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  830. },
  831. {
  832. .compatible = "renesas,rcar-gen4-cmt1",
  833. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  834. },
  835. { }
  836. };
  837. MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
  838. static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
  839. {
  840. unsigned int mask, i;
  841. unsigned long rate;
  842. int ret;
  843. cmt->pdev = pdev;
  844. raw_spin_lock_init(&cmt->lock);
  845. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  846. cmt->info = of_device_get_match_data(&pdev->dev);
  847. cmt->hw_channels = cmt->info->channels_mask;
  848. } else if (pdev->dev.platform_data) {
  849. struct sh_timer_config *cfg = pdev->dev.platform_data;
  850. const struct platform_device_id *id = pdev->id_entry;
  851. cmt->info = (const struct sh_cmt_info *)id->driver_data;
  852. cmt->hw_channels = cfg->channels_mask;
  853. } else {
  854. dev_err(&cmt->pdev->dev, "missing platform data\n");
  855. return -ENXIO;
  856. }
  857. /* Get hold of clock. */
  858. cmt->clk = clk_get(&cmt->pdev->dev, "fck");
  859. if (IS_ERR(cmt->clk)) {
  860. dev_err(&cmt->pdev->dev, "cannot get clock\n");
  861. return PTR_ERR(cmt->clk);
  862. }
  863. ret = clk_prepare(cmt->clk);
  864. if (ret < 0)
  865. goto err_clk_put;
  866. /* Determine clock rate. */
  867. ret = clk_enable(cmt->clk);
  868. if (ret < 0)
  869. goto err_clk_unprepare;
  870. rate = clk_get_rate(cmt->clk);
  871. if (!rate) {
  872. ret = -EINVAL;
  873. goto err_clk_disable;
  874. }
  875. /* We shall wait 2 input clks after register writes */
  876. if (cmt->info->model >= SH_CMT_48BIT)
  877. cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
  878. cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
  879. /* Map the memory resource(s). */
  880. ret = sh_cmt_map_memory(cmt);
  881. if (ret < 0)
  882. goto err_clk_disable;
  883. /* Allocate and setup the channels. */
  884. cmt->num_channels = hweight8(cmt->hw_channels);
  885. cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
  886. GFP_KERNEL);
  887. if (cmt->channels == NULL) {
  888. ret = -ENOMEM;
  889. goto err_unmap;
  890. }
  891. /*
  892. * Use the first channel as a clock event device and the second channel
  893. * as a clock source. If only one channel is available use it for both.
  894. */
  895. for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
  896. unsigned int hwidx = ffs(mask) - 1;
  897. bool clocksource = i == 1 || cmt->num_channels == 1;
  898. bool clockevent = i == 0;
  899. ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
  900. clockevent, clocksource, cmt);
  901. if (ret < 0)
  902. goto err_unmap;
  903. mask &= ~(1 << hwidx);
  904. }
  905. clk_disable(cmt->clk);
  906. platform_set_drvdata(pdev, cmt);
  907. return 0;
  908. err_unmap:
  909. kfree(cmt->channels);
  910. iounmap(cmt->mapbase);
  911. err_clk_disable:
  912. clk_disable(cmt->clk);
  913. err_clk_unprepare:
  914. clk_unprepare(cmt->clk);
  915. err_clk_put:
  916. clk_put(cmt->clk);
  917. return ret;
  918. }
  919. static int sh_cmt_probe(struct platform_device *pdev)
  920. {
  921. struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
  922. int ret;
  923. if (!is_sh_early_platform_device(pdev)) {
  924. pm_runtime_set_active(&pdev->dev);
  925. pm_runtime_enable(&pdev->dev);
  926. }
  927. if (cmt) {
  928. dev_info(&pdev->dev, "kept as earlytimer\n");
  929. goto out;
  930. }
  931. cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
  932. if (cmt == NULL)
  933. return -ENOMEM;
  934. ret = sh_cmt_setup(cmt, pdev);
  935. if (ret) {
  936. kfree(cmt);
  937. pm_runtime_idle(&pdev->dev);
  938. return ret;
  939. }
  940. if (is_sh_early_platform_device(pdev))
  941. return 0;
  942. out:
  943. if (cmt->has_clockevent || cmt->has_clocksource)
  944. pm_runtime_irq_safe(&pdev->dev);
  945. else
  946. pm_runtime_idle(&pdev->dev);
  947. return 0;
  948. }
  949. static int sh_cmt_remove(struct platform_device *pdev)
  950. {
  951. return -EBUSY; /* cannot unregister clockevent and clocksource */
  952. }
  953. static struct platform_driver sh_cmt_device_driver = {
  954. .probe = sh_cmt_probe,
  955. .remove = sh_cmt_remove,
  956. .driver = {
  957. .name = "sh_cmt",
  958. .of_match_table = of_match_ptr(sh_cmt_of_table),
  959. },
  960. .id_table = sh_cmt_id_table,
  961. };
  962. static int __init sh_cmt_init(void)
  963. {
  964. return platform_driver_register(&sh_cmt_device_driver);
  965. }
  966. static void __exit sh_cmt_exit(void)
  967. {
  968. platform_driver_unregister(&sh_cmt_device_driver);
  969. }
  970. #ifdef CONFIG_SUPERH
  971. sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
  972. #endif
  973. subsys_initcall(sh_cmt_init);
  974. module_exit(sh_cmt_exit);
  975. MODULE_AUTHOR("Magnus Damm");
  976. MODULE_DESCRIPTION("SuperH CMT Timer Driver");
  977. MODULE_LICENSE("GPL v2");