sh_tmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * SuperH Timer Support - TMU
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_domain.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/sh_timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. enum sh_tmu_model {
  34. SH_TMU,
  35. SH_TMU_SH3,
  36. };
  37. struct sh_tmu_device;
  38. struct sh_tmu_channel {
  39. struct sh_tmu_device *tmu;
  40. unsigned int index;
  41. void __iomem *base;
  42. int irq;
  43. unsigned long rate;
  44. unsigned long periodic;
  45. struct clock_event_device ced;
  46. struct clocksource cs;
  47. bool cs_enabled;
  48. unsigned int enable_count;
  49. };
  50. struct sh_tmu_device {
  51. struct platform_device *pdev;
  52. void __iomem *mapbase;
  53. struct clk *clk;
  54. enum sh_tmu_model model;
  55. raw_spinlock_t lock; /* Protect the shared start/stop register */
  56. struct sh_tmu_channel *channels;
  57. unsigned int num_channels;
  58. bool has_clockevent;
  59. bool has_clocksource;
  60. };
  61. #define TSTR -1 /* shared register */
  62. #define TCOR 0 /* channel register */
  63. #define TCNT 1 /* channel register */
  64. #define TCR 2 /* channel register */
  65. #define TCR_UNF (1 << 8)
  66. #define TCR_UNIE (1 << 5)
  67. #define TCR_TPSC_CLK4 (0 << 0)
  68. #define TCR_TPSC_CLK16 (1 << 0)
  69. #define TCR_TPSC_CLK64 (2 << 0)
  70. #define TCR_TPSC_CLK256 (3 << 0)
  71. #define TCR_TPSC_CLK1024 (4 << 0)
  72. #define TCR_TPSC_MASK (7 << 0)
  73. static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
  74. {
  75. unsigned long offs;
  76. if (reg_nr == TSTR) {
  77. switch (ch->tmu->model) {
  78. case SH_TMU_SH3:
  79. return ioread8(ch->tmu->mapbase + 2);
  80. case SH_TMU:
  81. return ioread8(ch->tmu->mapbase + 4);
  82. }
  83. }
  84. offs = reg_nr << 2;
  85. if (reg_nr == TCR)
  86. return ioread16(ch->base + offs);
  87. else
  88. return ioread32(ch->base + offs);
  89. }
  90. static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
  91. unsigned long value)
  92. {
  93. unsigned long offs;
  94. if (reg_nr == TSTR) {
  95. switch (ch->tmu->model) {
  96. case SH_TMU_SH3:
  97. return iowrite8(value, ch->tmu->mapbase + 2);
  98. case SH_TMU:
  99. return iowrite8(value, ch->tmu->mapbase + 4);
  100. }
  101. }
  102. offs = reg_nr << 2;
  103. if (reg_nr == TCR)
  104. iowrite16(value, ch->base + offs);
  105. else
  106. iowrite32(value, ch->base + offs);
  107. }
  108. static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
  109. {
  110. unsigned long flags, value;
  111. /* start stop register shared by multiple timer channels */
  112. raw_spin_lock_irqsave(&ch->tmu->lock, flags);
  113. value = sh_tmu_read(ch, TSTR);
  114. if (start)
  115. value |= 1 << ch->index;
  116. else
  117. value &= ~(1 << ch->index);
  118. sh_tmu_write(ch, TSTR, value);
  119. raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
  120. }
  121. static int __sh_tmu_enable(struct sh_tmu_channel *ch)
  122. {
  123. int ret;
  124. /* enable clock */
  125. ret = clk_enable(ch->tmu->clk);
  126. if (ret) {
  127. dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
  128. ch->index);
  129. return ret;
  130. }
  131. /* make sure channel is disabled */
  132. sh_tmu_start_stop_ch(ch, 0);
  133. /* maximum timeout */
  134. sh_tmu_write(ch, TCOR, 0xffffffff);
  135. sh_tmu_write(ch, TCNT, 0xffffffff);
  136. /* configure channel to parent clock / 4, irq off */
  137. ch->rate = clk_get_rate(ch->tmu->clk) / 4;
  138. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  139. /* enable channel */
  140. sh_tmu_start_stop_ch(ch, 1);
  141. return 0;
  142. }
  143. static int sh_tmu_enable(struct sh_tmu_channel *ch)
  144. {
  145. if (ch->enable_count++ > 0)
  146. return 0;
  147. pm_runtime_get_sync(&ch->tmu->pdev->dev);
  148. dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
  149. return __sh_tmu_enable(ch);
  150. }
  151. static void __sh_tmu_disable(struct sh_tmu_channel *ch)
  152. {
  153. /* disable channel */
  154. sh_tmu_start_stop_ch(ch, 0);
  155. /* disable interrupts in TMU block */
  156. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  157. /* stop clock */
  158. clk_disable(ch->tmu->clk);
  159. }
  160. static void sh_tmu_disable(struct sh_tmu_channel *ch)
  161. {
  162. if (WARN_ON(ch->enable_count == 0))
  163. return;
  164. if (--ch->enable_count > 0)
  165. return;
  166. __sh_tmu_disable(ch);
  167. dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
  168. pm_runtime_put(&ch->tmu->pdev->dev);
  169. }
  170. static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
  171. int periodic)
  172. {
  173. /* stop timer */
  174. sh_tmu_start_stop_ch(ch, 0);
  175. /* acknowledge interrupt */
  176. sh_tmu_read(ch, TCR);
  177. /* enable interrupt */
  178. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  179. /* reload delta value in case of periodic timer */
  180. if (periodic)
  181. sh_tmu_write(ch, TCOR, delta);
  182. else
  183. sh_tmu_write(ch, TCOR, 0xffffffff);
  184. sh_tmu_write(ch, TCNT, delta);
  185. /* start timer */
  186. sh_tmu_start_stop_ch(ch, 1);
  187. }
  188. static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
  189. {
  190. struct sh_tmu_channel *ch = dev_id;
  191. /* disable or acknowledge interrupt */
  192. if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
  193. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  194. else
  195. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  196. /* notify clockevent layer */
  197. ch->ced.event_handler(&ch->ced);
  198. return IRQ_HANDLED;
  199. }
  200. static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
  201. {
  202. return container_of(cs, struct sh_tmu_channel, cs);
  203. }
  204. static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
  205. {
  206. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  207. return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
  208. }
  209. static int sh_tmu_clocksource_enable(struct clocksource *cs)
  210. {
  211. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  212. int ret;
  213. if (WARN_ON(ch->cs_enabled))
  214. return 0;
  215. ret = sh_tmu_enable(ch);
  216. if (!ret) {
  217. __clocksource_update_freq_hz(cs, ch->rate);
  218. ch->cs_enabled = true;
  219. }
  220. return ret;
  221. }
  222. static void sh_tmu_clocksource_disable(struct clocksource *cs)
  223. {
  224. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  225. if (WARN_ON(!ch->cs_enabled))
  226. return;
  227. sh_tmu_disable(ch);
  228. ch->cs_enabled = false;
  229. }
  230. static void sh_tmu_clocksource_suspend(struct clocksource *cs)
  231. {
  232. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  233. if (!ch->cs_enabled)
  234. return;
  235. if (--ch->enable_count == 0) {
  236. __sh_tmu_disable(ch);
  237. pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
  238. }
  239. }
  240. static void sh_tmu_clocksource_resume(struct clocksource *cs)
  241. {
  242. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  243. if (!ch->cs_enabled)
  244. return;
  245. if (ch->enable_count++ == 0) {
  246. pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
  247. __sh_tmu_enable(ch);
  248. }
  249. }
  250. static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
  251. const char *name)
  252. {
  253. struct clocksource *cs = &ch->cs;
  254. cs->name = name;
  255. cs->rating = 200;
  256. cs->read = sh_tmu_clocksource_read;
  257. cs->enable = sh_tmu_clocksource_enable;
  258. cs->disable = sh_tmu_clocksource_disable;
  259. cs->suspend = sh_tmu_clocksource_suspend;
  260. cs->resume = sh_tmu_clocksource_resume;
  261. cs->mask = CLOCKSOURCE_MASK(32);
  262. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  263. dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
  264. ch->index);
  265. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  266. clocksource_register_hz(cs, 1);
  267. return 0;
  268. }
  269. static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
  270. {
  271. return container_of(ced, struct sh_tmu_channel, ced);
  272. }
  273. static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
  274. {
  275. struct clock_event_device *ced = &ch->ced;
  276. sh_tmu_enable(ch);
  277. clockevents_config(ced, ch->rate);
  278. if (periodic) {
  279. ch->periodic = (ch->rate + HZ/2) / HZ;
  280. sh_tmu_set_next(ch, ch->periodic, 1);
  281. }
  282. }
  283. static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
  284. struct clock_event_device *ced)
  285. {
  286. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  287. int disabled = 0;
  288. /* deal with old setting first */
  289. switch (ced->mode) {
  290. case CLOCK_EVT_MODE_PERIODIC:
  291. case CLOCK_EVT_MODE_ONESHOT:
  292. sh_tmu_disable(ch);
  293. disabled = 1;
  294. break;
  295. default:
  296. break;
  297. }
  298. switch (mode) {
  299. case CLOCK_EVT_MODE_PERIODIC:
  300. dev_info(&ch->tmu->pdev->dev,
  301. "ch%u: used for periodic clock events\n", ch->index);
  302. sh_tmu_clock_event_start(ch, 1);
  303. break;
  304. case CLOCK_EVT_MODE_ONESHOT:
  305. dev_info(&ch->tmu->pdev->dev,
  306. "ch%u: used for oneshot clock events\n", ch->index);
  307. sh_tmu_clock_event_start(ch, 0);
  308. break;
  309. case CLOCK_EVT_MODE_UNUSED:
  310. if (!disabled)
  311. sh_tmu_disable(ch);
  312. break;
  313. case CLOCK_EVT_MODE_SHUTDOWN:
  314. default:
  315. break;
  316. }
  317. }
  318. static int sh_tmu_clock_event_next(unsigned long delta,
  319. struct clock_event_device *ced)
  320. {
  321. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  322. BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
  323. /* program new delta value */
  324. sh_tmu_set_next(ch, delta, 0);
  325. return 0;
  326. }
  327. static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
  328. {
  329. pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  330. }
  331. static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
  332. {
  333. pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  334. }
  335. static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
  336. const char *name)
  337. {
  338. struct clock_event_device *ced = &ch->ced;
  339. int ret;
  340. ced->name = name;
  341. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  342. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  343. ced->rating = 200;
  344. ced->cpumask = cpu_possible_mask;
  345. ced->set_next_event = sh_tmu_clock_event_next;
  346. ced->set_mode = sh_tmu_clock_event_mode;
  347. ced->suspend = sh_tmu_clock_event_suspend;
  348. ced->resume = sh_tmu_clock_event_resume;
  349. dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
  350. ch->index);
  351. clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
  352. ret = request_irq(ch->irq, sh_tmu_interrupt,
  353. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  354. dev_name(&ch->tmu->pdev->dev), ch);
  355. if (ret) {
  356. dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
  357. ch->index, ch->irq);
  358. return;
  359. }
  360. }
  361. static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
  362. bool clockevent, bool clocksource)
  363. {
  364. if (clockevent) {
  365. ch->tmu->has_clockevent = true;
  366. sh_tmu_register_clockevent(ch, name);
  367. } else if (clocksource) {
  368. ch->tmu->has_clocksource = true;
  369. sh_tmu_register_clocksource(ch, name);
  370. }
  371. return 0;
  372. }
  373. static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
  374. bool clockevent, bool clocksource,
  375. struct sh_tmu_device *tmu)
  376. {
  377. /* Skip unused channels. */
  378. if (!clockevent && !clocksource)
  379. return 0;
  380. ch->tmu = tmu;
  381. ch->index = index;
  382. if (tmu->model == SH_TMU_SH3)
  383. ch->base = tmu->mapbase + 4 + ch->index * 12;
  384. else
  385. ch->base = tmu->mapbase + 8 + ch->index * 12;
  386. ch->irq = platform_get_irq(tmu->pdev, index);
  387. if (ch->irq < 0) {
  388. dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
  389. ch->index);
  390. return ch->irq;
  391. }
  392. ch->cs_enabled = false;
  393. ch->enable_count = 0;
  394. return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
  395. clockevent, clocksource);
  396. }
  397. static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
  398. {
  399. struct resource *res;
  400. res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
  401. if (!res) {
  402. dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
  403. return -ENXIO;
  404. }
  405. tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
  406. if (tmu->mapbase == NULL)
  407. return -ENXIO;
  408. return 0;
  409. }
  410. static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
  411. {
  412. struct device_node *np = tmu->pdev->dev.of_node;
  413. tmu->model = SH_TMU;
  414. tmu->num_channels = 3;
  415. of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
  416. if (tmu->num_channels != 2 && tmu->num_channels != 3) {
  417. dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
  418. tmu->num_channels);
  419. return -EINVAL;
  420. }
  421. return 0;
  422. }
  423. static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
  424. {
  425. unsigned int i;
  426. int ret;
  427. tmu->pdev = pdev;
  428. raw_spin_lock_init(&tmu->lock);
  429. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  430. ret = sh_tmu_parse_dt(tmu);
  431. if (ret < 0)
  432. return ret;
  433. } else if (pdev->dev.platform_data) {
  434. const struct platform_device_id *id = pdev->id_entry;
  435. struct sh_timer_config *cfg = pdev->dev.platform_data;
  436. tmu->model = id->driver_data;
  437. tmu->num_channels = hweight8(cfg->channels_mask);
  438. } else {
  439. dev_err(&tmu->pdev->dev, "missing platform data\n");
  440. return -ENXIO;
  441. }
  442. /* Get hold of clock. */
  443. tmu->clk = clk_get(&tmu->pdev->dev, "fck");
  444. if (IS_ERR(tmu->clk)) {
  445. dev_err(&tmu->pdev->dev, "cannot get clock\n");
  446. return PTR_ERR(tmu->clk);
  447. }
  448. ret = clk_prepare(tmu->clk);
  449. if (ret < 0)
  450. goto err_clk_put;
  451. /* Map the memory resource. */
  452. ret = sh_tmu_map_memory(tmu);
  453. if (ret < 0) {
  454. dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
  455. goto err_clk_unprepare;
  456. }
  457. /* Allocate and setup the channels. */
  458. tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
  459. GFP_KERNEL);
  460. if (tmu->channels == NULL) {
  461. ret = -ENOMEM;
  462. goto err_unmap;
  463. }
  464. /*
  465. * Use the first channel as a clock event device and the second channel
  466. * as a clock source.
  467. */
  468. for (i = 0; i < tmu->num_channels; ++i) {
  469. ret = sh_tmu_channel_setup(&tmu->channels[i], i,
  470. i == 0, i == 1, tmu);
  471. if (ret < 0)
  472. goto err_unmap;
  473. }
  474. platform_set_drvdata(pdev, tmu);
  475. return 0;
  476. err_unmap:
  477. kfree(tmu->channels);
  478. iounmap(tmu->mapbase);
  479. err_clk_unprepare:
  480. clk_unprepare(tmu->clk);
  481. err_clk_put:
  482. clk_put(tmu->clk);
  483. return ret;
  484. }
  485. static int sh_tmu_probe(struct platform_device *pdev)
  486. {
  487. struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
  488. int ret;
  489. if (!is_early_platform_device(pdev)) {
  490. pm_runtime_set_active(&pdev->dev);
  491. pm_runtime_enable(&pdev->dev);
  492. }
  493. if (tmu) {
  494. dev_info(&pdev->dev, "kept as earlytimer\n");
  495. goto out;
  496. }
  497. tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
  498. if (tmu == NULL)
  499. return -ENOMEM;
  500. ret = sh_tmu_setup(tmu, pdev);
  501. if (ret) {
  502. kfree(tmu);
  503. pm_runtime_idle(&pdev->dev);
  504. return ret;
  505. }
  506. if (is_early_platform_device(pdev))
  507. return 0;
  508. out:
  509. if (tmu->has_clockevent || tmu->has_clocksource)
  510. pm_runtime_irq_safe(&pdev->dev);
  511. else
  512. pm_runtime_idle(&pdev->dev);
  513. return 0;
  514. }
  515. static int sh_tmu_remove(struct platform_device *pdev)
  516. {
  517. return -EBUSY; /* cannot unregister clockevent and clocksource */
  518. }
  519. static const struct platform_device_id sh_tmu_id_table[] = {
  520. { "sh-tmu", SH_TMU },
  521. { "sh-tmu-sh3", SH_TMU_SH3 },
  522. { }
  523. };
  524. MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
  525. static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
  526. { .compatible = "renesas,tmu" },
  527. { }
  528. };
  529. MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
  530. static struct platform_driver sh_tmu_device_driver = {
  531. .probe = sh_tmu_probe,
  532. .remove = sh_tmu_remove,
  533. .driver = {
  534. .name = "sh_tmu",
  535. .of_match_table = of_match_ptr(sh_tmu_of_table),
  536. },
  537. .id_table = sh_tmu_id_table,
  538. };
  539. static int __init sh_tmu_init(void)
  540. {
  541. return platform_driver_register(&sh_tmu_device_driver);
  542. }
  543. static void __exit sh_tmu_exit(void)
  544. {
  545. platform_driver_unregister(&sh_tmu_device_driver);
  546. }
  547. early_platform_init("earlytimer", &sh_tmu_device_driver);
  548. subsys_initcall(sh_tmu_init);
  549. module_exit(sh_tmu_exit);
  550. MODULE_AUTHOR("Magnus Damm");
  551. MODULE_DESCRIPTION("SuperH TMU Timer Driver");
  552. MODULE_LICENSE("GPL v2");