sh_tmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SuperH Timer Support - TMU
  4. *
  5. * Copyright (C) 2009 Magnus Damm
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/clockchips.h>
  9. #include <linux/clocksource.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/ioport.h>
  16. #include <linux/irq.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/sh_timer.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #ifdef CONFIG_SUPERH
  26. #include <asm/platform_early.h>
  27. #endif
  28. enum sh_tmu_model {
  29. SH_TMU,
  30. SH_TMU_SH3,
  31. };
  32. struct sh_tmu_device;
  33. struct sh_tmu_channel {
  34. struct sh_tmu_device *tmu;
  35. unsigned int index;
  36. void __iomem *base;
  37. int irq;
  38. unsigned long periodic;
  39. struct clock_event_device ced;
  40. struct clocksource cs;
  41. bool cs_enabled;
  42. unsigned int enable_count;
  43. };
  44. struct sh_tmu_device {
  45. struct platform_device *pdev;
  46. void __iomem *mapbase;
  47. struct clk *clk;
  48. unsigned long rate;
  49. enum sh_tmu_model model;
  50. raw_spinlock_t lock; /* Protect the shared start/stop register */
  51. struct sh_tmu_channel *channels;
  52. unsigned int num_channels;
  53. bool has_clockevent;
  54. bool has_clocksource;
  55. };
  56. #define TSTR -1 /* shared register */
  57. #define TCOR 0 /* channel register */
  58. #define TCNT 1 /* channel register */
  59. #define TCR 2 /* channel register */
  60. #define TCR_UNF (1 << 8)
  61. #define TCR_UNIE (1 << 5)
  62. #define TCR_TPSC_CLK4 (0 << 0)
  63. #define TCR_TPSC_CLK16 (1 << 0)
  64. #define TCR_TPSC_CLK64 (2 << 0)
  65. #define TCR_TPSC_CLK256 (3 << 0)
  66. #define TCR_TPSC_CLK1024 (4 << 0)
  67. #define TCR_TPSC_MASK (7 << 0)
  68. static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
  69. {
  70. unsigned long offs;
  71. if (reg_nr == TSTR) {
  72. switch (ch->tmu->model) {
  73. case SH_TMU_SH3:
  74. return ioread8(ch->tmu->mapbase + 2);
  75. case SH_TMU:
  76. return ioread8(ch->tmu->mapbase + 4);
  77. }
  78. }
  79. offs = reg_nr << 2;
  80. if (reg_nr == TCR)
  81. return ioread16(ch->base + offs);
  82. else
  83. return ioread32(ch->base + offs);
  84. }
  85. static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
  86. unsigned long value)
  87. {
  88. unsigned long offs;
  89. if (reg_nr == TSTR) {
  90. switch (ch->tmu->model) {
  91. case SH_TMU_SH3:
  92. return iowrite8(value, ch->tmu->mapbase + 2);
  93. case SH_TMU:
  94. return iowrite8(value, ch->tmu->mapbase + 4);
  95. }
  96. }
  97. offs = reg_nr << 2;
  98. if (reg_nr == TCR)
  99. iowrite16(value, ch->base + offs);
  100. else
  101. iowrite32(value, ch->base + offs);
  102. }
  103. static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
  104. {
  105. unsigned long flags, value;
  106. /* start stop register shared by multiple timer channels */
  107. raw_spin_lock_irqsave(&ch->tmu->lock, flags);
  108. value = sh_tmu_read(ch, TSTR);
  109. if (start)
  110. value |= 1 << ch->index;
  111. else
  112. value &= ~(1 << ch->index);
  113. sh_tmu_write(ch, TSTR, value);
  114. raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
  115. }
  116. static int __sh_tmu_enable(struct sh_tmu_channel *ch)
  117. {
  118. int ret;
  119. /* enable clock */
  120. ret = clk_enable(ch->tmu->clk);
  121. if (ret) {
  122. dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
  123. ch->index);
  124. return ret;
  125. }
  126. /* make sure channel is disabled */
  127. sh_tmu_start_stop_ch(ch, 0);
  128. /* maximum timeout */
  129. sh_tmu_write(ch, TCOR, 0xffffffff);
  130. sh_tmu_write(ch, TCNT, 0xffffffff);
  131. /* configure channel to parent clock / 4, irq off */
  132. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  133. /* enable channel */
  134. sh_tmu_start_stop_ch(ch, 1);
  135. return 0;
  136. }
  137. static int sh_tmu_enable(struct sh_tmu_channel *ch)
  138. {
  139. if (ch->enable_count++ > 0)
  140. return 0;
  141. pm_runtime_get_sync(&ch->tmu->pdev->dev);
  142. dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
  143. return __sh_tmu_enable(ch);
  144. }
  145. static void __sh_tmu_disable(struct sh_tmu_channel *ch)
  146. {
  147. /* disable channel */
  148. sh_tmu_start_stop_ch(ch, 0);
  149. /* disable interrupts in TMU block */
  150. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  151. /* stop clock */
  152. clk_disable(ch->tmu->clk);
  153. }
  154. static void sh_tmu_disable(struct sh_tmu_channel *ch)
  155. {
  156. if (WARN_ON(ch->enable_count == 0))
  157. return;
  158. if (--ch->enable_count > 0)
  159. return;
  160. __sh_tmu_disable(ch);
  161. dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
  162. pm_runtime_put(&ch->tmu->pdev->dev);
  163. }
  164. static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
  165. int periodic)
  166. {
  167. /* stop timer */
  168. sh_tmu_start_stop_ch(ch, 0);
  169. /* acknowledge interrupt */
  170. sh_tmu_read(ch, TCR);
  171. /* enable interrupt */
  172. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  173. /* reload delta value in case of periodic timer */
  174. if (periodic)
  175. sh_tmu_write(ch, TCOR, delta);
  176. else
  177. sh_tmu_write(ch, TCOR, 0xffffffff);
  178. sh_tmu_write(ch, TCNT, delta);
  179. /* start timer */
  180. sh_tmu_start_stop_ch(ch, 1);
  181. }
  182. static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
  183. {
  184. struct sh_tmu_channel *ch = dev_id;
  185. /* disable or acknowledge interrupt */
  186. if (clockevent_state_oneshot(&ch->ced))
  187. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  188. else
  189. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  190. /* notify clockevent layer */
  191. ch->ced.event_handler(&ch->ced);
  192. return IRQ_HANDLED;
  193. }
  194. static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
  195. {
  196. return container_of(cs, struct sh_tmu_channel, cs);
  197. }
  198. static u64 sh_tmu_clocksource_read(struct clocksource *cs)
  199. {
  200. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  201. return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
  202. }
  203. static int sh_tmu_clocksource_enable(struct clocksource *cs)
  204. {
  205. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  206. int ret;
  207. if (WARN_ON(ch->cs_enabled))
  208. return 0;
  209. ret = sh_tmu_enable(ch);
  210. if (!ret)
  211. ch->cs_enabled = true;
  212. return ret;
  213. }
  214. static void sh_tmu_clocksource_disable(struct clocksource *cs)
  215. {
  216. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  217. if (WARN_ON(!ch->cs_enabled))
  218. return;
  219. sh_tmu_disable(ch);
  220. ch->cs_enabled = false;
  221. }
  222. static void sh_tmu_clocksource_suspend(struct clocksource *cs)
  223. {
  224. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  225. if (!ch->cs_enabled)
  226. return;
  227. if (--ch->enable_count == 0) {
  228. __sh_tmu_disable(ch);
  229. dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
  230. }
  231. }
  232. static void sh_tmu_clocksource_resume(struct clocksource *cs)
  233. {
  234. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  235. if (!ch->cs_enabled)
  236. return;
  237. if (ch->enable_count++ == 0) {
  238. dev_pm_genpd_resume(&ch->tmu->pdev->dev);
  239. __sh_tmu_enable(ch);
  240. }
  241. }
  242. static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
  243. const char *name)
  244. {
  245. struct clocksource *cs = &ch->cs;
  246. cs->name = name;
  247. cs->rating = 200;
  248. cs->read = sh_tmu_clocksource_read;
  249. cs->enable = sh_tmu_clocksource_enable;
  250. cs->disable = sh_tmu_clocksource_disable;
  251. cs->suspend = sh_tmu_clocksource_suspend;
  252. cs->resume = sh_tmu_clocksource_resume;
  253. cs->mask = CLOCKSOURCE_MASK(32);
  254. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  255. dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
  256. ch->index);
  257. clocksource_register_hz(cs, ch->tmu->rate);
  258. return 0;
  259. }
  260. static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
  261. {
  262. return container_of(ced, struct sh_tmu_channel, ced);
  263. }
  264. static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
  265. {
  266. sh_tmu_enable(ch);
  267. if (periodic) {
  268. ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
  269. sh_tmu_set_next(ch, ch->periodic, 1);
  270. }
  271. }
  272. static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
  273. {
  274. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  275. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  276. sh_tmu_disable(ch);
  277. return 0;
  278. }
  279. static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
  280. int periodic)
  281. {
  282. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  283. /* deal with old setting first */
  284. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  285. sh_tmu_disable(ch);
  286. dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
  287. ch->index, periodic ? "periodic" : "oneshot");
  288. sh_tmu_clock_event_start(ch, periodic);
  289. return 0;
  290. }
  291. static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
  292. {
  293. return sh_tmu_clock_event_set_state(ced, 0);
  294. }
  295. static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
  296. {
  297. return sh_tmu_clock_event_set_state(ced, 1);
  298. }
  299. static int sh_tmu_clock_event_next(unsigned long delta,
  300. struct clock_event_device *ced)
  301. {
  302. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  303. BUG_ON(!clockevent_state_oneshot(ced));
  304. /* program new delta value */
  305. sh_tmu_set_next(ch, delta, 0);
  306. return 0;
  307. }
  308. static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
  309. {
  310. dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  311. }
  312. static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
  313. {
  314. dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  315. }
  316. static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
  317. const char *name)
  318. {
  319. struct clock_event_device *ced = &ch->ced;
  320. int ret;
  321. ced->name = name;
  322. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  323. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  324. ced->rating = 200;
  325. ced->cpumask = cpu_possible_mask;
  326. ced->set_next_event = sh_tmu_clock_event_next;
  327. ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
  328. ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
  329. ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
  330. ced->suspend = sh_tmu_clock_event_suspend;
  331. ced->resume = sh_tmu_clock_event_resume;
  332. dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
  333. ch->index);
  334. clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
  335. ret = request_irq(ch->irq, sh_tmu_interrupt,
  336. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  337. dev_name(&ch->tmu->pdev->dev), ch);
  338. if (ret) {
  339. dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
  340. ch->index, ch->irq);
  341. return;
  342. }
  343. }
  344. static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
  345. bool clockevent, bool clocksource)
  346. {
  347. if (clockevent) {
  348. ch->tmu->has_clockevent = true;
  349. sh_tmu_register_clockevent(ch, name);
  350. } else if (clocksource) {
  351. ch->tmu->has_clocksource = true;
  352. sh_tmu_register_clocksource(ch, name);
  353. }
  354. return 0;
  355. }
  356. static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
  357. bool clockevent, bool clocksource,
  358. struct sh_tmu_device *tmu)
  359. {
  360. /* Skip unused channels. */
  361. if (!clockevent && !clocksource)
  362. return 0;
  363. ch->tmu = tmu;
  364. ch->index = index;
  365. if (tmu->model == SH_TMU_SH3)
  366. ch->base = tmu->mapbase + 4 + ch->index * 12;
  367. else
  368. ch->base = tmu->mapbase + 8 + ch->index * 12;
  369. ch->irq = platform_get_irq(tmu->pdev, index);
  370. if (ch->irq < 0)
  371. return ch->irq;
  372. ch->cs_enabled = false;
  373. ch->enable_count = 0;
  374. return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
  375. clockevent, clocksource);
  376. }
  377. static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
  378. {
  379. struct resource *res;
  380. res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
  381. if (!res) {
  382. dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
  383. return -ENXIO;
  384. }
  385. tmu->mapbase = ioremap(res->start, resource_size(res));
  386. if (tmu->mapbase == NULL)
  387. return -ENXIO;
  388. return 0;
  389. }
  390. static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
  391. {
  392. struct device_node *np = tmu->pdev->dev.of_node;
  393. tmu->model = SH_TMU;
  394. tmu->num_channels = 3;
  395. of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
  396. if (tmu->num_channels != 2 && tmu->num_channels != 3) {
  397. dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
  398. tmu->num_channels);
  399. return -EINVAL;
  400. }
  401. return 0;
  402. }
  403. static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
  404. {
  405. unsigned int i;
  406. int ret;
  407. tmu->pdev = pdev;
  408. raw_spin_lock_init(&tmu->lock);
  409. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  410. ret = sh_tmu_parse_dt(tmu);
  411. if (ret < 0)
  412. return ret;
  413. } else if (pdev->dev.platform_data) {
  414. const struct platform_device_id *id = pdev->id_entry;
  415. struct sh_timer_config *cfg = pdev->dev.platform_data;
  416. tmu->model = id->driver_data;
  417. tmu->num_channels = hweight8(cfg->channels_mask);
  418. } else {
  419. dev_err(&tmu->pdev->dev, "missing platform data\n");
  420. return -ENXIO;
  421. }
  422. /* Get hold of clock. */
  423. tmu->clk = clk_get(&tmu->pdev->dev, "fck");
  424. if (IS_ERR(tmu->clk)) {
  425. dev_err(&tmu->pdev->dev, "cannot get clock\n");
  426. return PTR_ERR(tmu->clk);
  427. }
  428. ret = clk_prepare(tmu->clk);
  429. if (ret < 0)
  430. goto err_clk_put;
  431. /* Determine clock rate. */
  432. ret = clk_enable(tmu->clk);
  433. if (ret < 0)
  434. goto err_clk_unprepare;
  435. tmu->rate = clk_get_rate(tmu->clk) / 4;
  436. clk_disable(tmu->clk);
  437. /* Map the memory resource. */
  438. ret = sh_tmu_map_memory(tmu);
  439. if (ret < 0) {
  440. dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
  441. goto err_clk_unprepare;
  442. }
  443. /* Allocate and setup the channels. */
  444. tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
  445. GFP_KERNEL);
  446. if (tmu->channels == NULL) {
  447. ret = -ENOMEM;
  448. goto err_unmap;
  449. }
  450. /*
  451. * Use the first channel as a clock event device and the second channel
  452. * as a clock source.
  453. */
  454. for (i = 0; i < tmu->num_channels; ++i) {
  455. ret = sh_tmu_channel_setup(&tmu->channels[i], i,
  456. i == 0, i == 1, tmu);
  457. if (ret < 0)
  458. goto err_unmap;
  459. }
  460. platform_set_drvdata(pdev, tmu);
  461. return 0;
  462. err_unmap:
  463. kfree(tmu->channels);
  464. iounmap(tmu->mapbase);
  465. err_clk_unprepare:
  466. clk_unprepare(tmu->clk);
  467. err_clk_put:
  468. clk_put(tmu->clk);
  469. return ret;
  470. }
  471. static int sh_tmu_probe(struct platform_device *pdev)
  472. {
  473. struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
  474. int ret;
  475. if (!is_sh_early_platform_device(pdev)) {
  476. pm_runtime_set_active(&pdev->dev);
  477. pm_runtime_enable(&pdev->dev);
  478. }
  479. if (tmu) {
  480. dev_info(&pdev->dev, "kept as earlytimer\n");
  481. goto out;
  482. }
  483. tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
  484. if (tmu == NULL)
  485. return -ENOMEM;
  486. ret = sh_tmu_setup(tmu, pdev);
  487. if (ret) {
  488. kfree(tmu);
  489. pm_runtime_idle(&pdev->dev);
  490. return ret;
  491. }
  492. if (is_sh_early_platform_device(pdev))
  493. return 0;
  494. out:
  495. if (tmu->has_clockevent || tmu->has_clocksource)
  496. pm_runtime_irq_safe(&pdev->dev);
  497. else
  498. pm_runtime_idle(&pdev->dev);
  499. return 0;
  500. }
  501. static int sh_tmu_remove(struct platform_device *pdev)
  502. {
  503. return -EBUSY; /* cannot unregister clockevent and clocksource */
  504. }
  505. static const struct platform_device_id sh_tmu_id_table[] = {
  506. { "sh-tmu", SH_TMU },
  507. { "sh-tmu-sh3", SH_TMU_SH3 },
  508. { }
  509. };
  510. MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
  511. static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
  512. { .compatible = "renesas,tmu" },
  513. { }
  514. };
  515. MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
  516. static struct platform_driver sh_tmu_device_driver = {
  517. .probe = sh_tmu_probe,
  518. .remove = sh_tmu_remove,
  519. .driver = {
  520. .name = "sh_tmu",
  521. .of_match_table = of_match_ptr(sh_tmu_of_table),
  522. },
  523. .id_table = sh_tmu_id_table,
  524. };
  525. static int __init sh_tmu_init(void)
  526. {
  527. return platform_driver_register(&sh_tmu_device_driver);
  528. }
  529. static void __exit sh_tmu_exit(void)
  530. {
  531. platform_driver_unregister(&sh_tmu_device_driver);
  532. }
  533. #ifdef CONFIG_SUPERH
  534. sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
  535. #endif
  536. subsys_initcall(sh_tmu_init);
  537. module_exit(sh_tmu_exit);
  538. MODULE_AUTHOR("Magnus Damm");
  539. MODULE_DESCRIPTION("SuperH TMU Timer Driver");
  540. MODULE_LICENSE("GPL v2");