sh_tmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * SuperH Timer Support - TMU
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/pm_domain.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/sh_timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. enum sh_tmu_model {
  33. SH_TMU_LEGACY,
  34. SH_TMU,
  35. SH_TMU_SH3,
  36. };
  37. struct sh_tmu_device;
  38. struct sh_tmu_channel {
  39. struct sh_tmu_device *tmu;
  40. unsigned int index;
  41. void __iomem *base;
  42. int irq;
  43. unsigned long rate;
  44. unsigned long periodic;
  45. struct clock_event_device ced;
  46. struct clocksource cs;
  47. bool cs_enabled;
  48. unsigned int enable_count;
  49. };
  50. struct sh_tmu_device {
  51. struct platform_device *pdev;
  52. void __iomem *mapbase;
  53. struct clk *clk;
  54. enum sh_tmu_model model;
  55. struct sh_tmu_channel *channels;
  56. unsigned int num_channels;
  57. bool has_clockevent;
  58. bool has_clocksource;
  59. };
  60. static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
  61. #define TSTR -1 /* shared register */
  62. #define TCOR 0 /* channel register */
  63. #define TCNT 1 /* channel register */
  64. #define TCR 2 /* channel register */
  65. #define TCR_UNF (1 << 8)
  66. #define TCR_UNIE (1 << 5)
  67. #define TCR_TPSC_CLK4 (0 << 0)
  68. #define TCR_TPSC_CLK16 (1 << 0)
  69. #define TCR_TPSC_CLK64 (2 << 0)
  70. #define TCR_TPSC_CLK256 (3 << 0)
  71. #define TCR_TPSC_CLK1024 (4 << 0)
  72. #define TCR_TPSC_MASK (7 << 0)
  73. static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
  74. {
  75. unsigned long offs;
  76. if (reg_nr == TSTR) {
  77. switch (ch->tmu->model) {
  78. case SH_TMU_LEGACY:
  79. return ioread8(ch->tmu->mapbase);
  80. case SH_TMU_SH3:
  81. return ioread8(ch->tmu->mapbase + 2);
  82. case SH_TMU:
  83. return ioread8(ch->tmu->mapbase + 4);
  84. }
  85. }
  86. offs = reg_nr << 2;
  87. if (reg_nr == TCR)
  88. return ioread16(ch->base + offs);
  89. else
  90. return ioread32(ch->base + offs);
  91. }
  92. static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
  93. unsigned long value)
  94. {
  95. unsigned long offs;
  96. if (reg_nr == TSTR) {
  97. switch (ch->tmu->model) {
  98. case SH_TMU_LEGACY:
  99. return iowrite8(value, ch->tmu->mapbase);
  100. case SH_TMU_SH3:
  101. return iowrite8(value, ch->tmu->mapbase + 2);
  102. case SH_TMU:
  103. return iowrite8(value, ch->tmu->mapbase + 4);
  104. }
  105. }
  106. offs = reg_nr << 2;
  107. if (reg_nr == TCR)
  108. iowrite16(value, ch->base + offs);
  109. else
  110. iowrite32(value, ch->base + offs);
  111. }
  112. static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
  113. {
  114. unsigned long flags, value;
  115. /* start stop register shared by multiple timer channels */
  116. raw_spin_lock_irqsave(&sh_tmu_lock, flags);
  117. value = sh_tmu_read(ch, TSTR);
  118. if (start)
  119. value |= 1 << ch->index;
  120. else
  121. value &= ~(1 << ch->index);
  122. sh_tmu_write(ch, TSTR, value);
  123. raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
  124. }
  125. static int __sh_tmu_enable(struct sh_tmu_channel *ch)
  126. {
  127. int ret;
  128. /* enable clock */
  129. ret = clk_enable(ch->tmu->clk);
  130. if (ret) {
  131. dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
  132. ch->index);
  133. return ret;
  134. }
  135. /* make sure channel is disabled */
  136. sh_tmu_start_stop_ch(ch, 0);
  137. /* maximum timeout */
  138. sh_tmu_write(ch, TCOR, 0xffffffff);
  139. sh_tmu_write(ch, TCNT, 0xffffffff);
  140. /* configure channel to parent clock / 4, irq off */
  141. ch->rate = clk_get_rate(ch->tmu->clk) / 4;
  142. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  143. /* enable channel */
  144. sh_tmu_start_stop_ch(ch, 1);
  145. return 0;
  146. }
  147. static int sh_tmu_enable(struct sh_tmu_channel *ch)
  148. {
  149. if (ch->enable_count++ > 0)
  150. return 0;
  151. pm_runtime_get_sync(&ch->tmu->pdev->dev);
  152. dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
  153. return __sh_tmu_enable(ch);
  154. }
  155. static void __sh_tmu_disable(struct sh_tmu_channel *ch)
  156. {
  157. /* disable channel */
  158. sh_tmu_start_stop_ch(ch, 0);
  159. /* disable interrupts in TMU block */
  160. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  161. /* stop clock */
  162. clk_disable(ch->tmu->clk);
  163. }
  164. static void sh_tmu_disable(struct sh_tmu_channel *ch)
  165. {
  166. if (WARN_ON(ch->enable_count == 0))
  167. return;
  168. if (--ch->enable_count > 0)
  169. return;
  170. __sh_tmu_disable(ch);
  171. dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
  172. pm_runtime_put(&ch->tmu->pdev->dev);
  173. }
  174. static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
  175. int periodic)
  176. {
  177. /* stop timer */
  178. sh_tmu_start_stop_ch(ch, 0);
  179. /* acknowledge interrupt */
  180. sh_tmu_read(ch, TCR);
  181. /* enable interrupt */
  182. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  183. /* reload delta value in case of periodic timer */
  184. if (periodic)
  185. sh_tmu_write(ch, TCOR, delta);
  186. else
  187. sh_tmu_write(ch, TCOR, 0xffffffff);
  188. sh_tmu_write(ch, TCNT, delta);
  189. /* start timer */
  190. sh_tmu_start_stop_ch(ch, 1);
  191. }
  192. static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
  193. {
  194. struct sh_tmu_channel *ch = dev_id;
  195. /* disable or acknowledge interrupt */
  196. if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
  197. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  198. else
  199. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  200. /* notify clockevent layer */
  201. ch->ced.event_handler(&ch->ced);
  202. return IRQ_HANDLED;
  203. }
  204. static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
  205. {
  206. return container_of(cs, struct sh_tmu_channel, cs);
  207. }
  208. static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
  209. {
  210. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  211. return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
  212. }
  213. static int sh_tmu_clocksource_enable(struct clocksource *cs)
  214. {
  215. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  216. int ret;
  217. if (WARN_ON(ch->cs_enabled))
  218. return 0;
  219. ret = sh_tmu_enable(ch);
  220. if (!ret) {
  221. __clocksource_updatefreq_hz(cs, ch->rate);
  222. ch->cs_enabled = true;
  223. }
  224. return ret;
  225. }
  226. static void sh_tmu_clocksource_disable(struct clocksource *cs)
  227. {
  228. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  229. if (WARN_ON(!ch->cs_enabled))
  230. return;
  231. sh_tmu_disable(ch);
  232. ch->cs_enabled = false;
  233. }
  234. static void sh_tmu_clocksource_suspend(struct clocksource *cs)
  235. {
  236. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  237. if (!ch->cs_enabled)
  238. return;
  239. if (--ch->enable_count == 0) {
  240. __sh_tmu_disable(ch);
  241. pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
  242. }
  243. }
  244. static void sh_tmu_clocksource_resume(struct clocksource *cs)
  245. {
  246. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  247. if (!ch->cs_enabled)
  248. return;
  249. if (ch->enable_count++ == 0) {
  250. pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
  251. __sh_tmu_enable(ch);
  252. }
  253. }
  254. static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
  255. const char *name)
  256. {
  257. struct clocksource *cs = &ch->cs;
  258. cs->name = name;
  259. cs->rating = 200;
  260. cs->read = sh_tmu_clocksource_read;
  261. cs->enable = sh_tmu_clocksource_enable;
  262. cs->disable = sh_tmu_clocksource_disable;
  263. cs->suspend = sh_tmu_clocksource_suspend;
  264. cs->resume = sh_tmu_clocksource_resume;
  265. cs->mask = CLOCKSOURCE_MASK(32);
  266. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  267. dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
  268. ch->index);
  269. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  270. clocksource_register_hz(cs, 1);
  271. return 0;
  272. }
  273. static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
  274. {
  275. return container_of(ced, struct sh_tmu_channel, ced);
  276. }
  277. static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
  278. {
  279. struct clock_event_device *ced = &ch->ced;
  280. sh_tmu_enable(ch);
  281. clockevents_config(ced, ch->rate);
  282. if (periodic) {
  283. ch->periodic = (ch->rate + HZ/2) / HZ;
  284. sh_tmu_set_next(ch, ch->periodic, 1);
  285. }
  286. }
  287. static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
  288. struct clock_event_device *ced)
  289. {
  290. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  291. int disabled = 0;
  292. /* deal with old setting first */
  293. switch (ced->mode) {
  294. case CLOCK_EVT_MODE_PERIODIC:
  295. case CLOCK_EVT_MODE_ONESHOT:
  296. sh_tmu_disable(ch);
  297. disabled = 1;
  298. break;
  299. default:
  300. break;
  301. }
  302. switch (mode) {
  303. case CLOCK_EVT_MODE_PERIODIC:
  304. dev_info(&ch->tmu->pdev->dev,
  305. "ch%u: used for periodic clock events\n", ch->index);
  306. sh_tmu_clock_event_start(ch, 1);
  307. break;
  308. case CLOCK_EVT_MODE_ONESHOT:
  309. dev_info(&ch->tmu->pdev->dev,
  310. "ch%u: used for oneshot clock events\n", ch->index);
  311. sh_tmu_clock_event_start(ch, 0);
  312. break;
  313. case CLOCK_EVT_MODE_UNUSED:
  314. if (!disabled)
  315. sh_tmu_disable(ch);
  316. break;
  317. case CLOCK_EVT_MODE_SHUTDOWN:
  318. default:
  319. break;
  320. }
  321. }
  322. static int sh_tmu_clock_event_next(unsigned long delta,
  323. struct clock_event_device *ced)
  324. {
  325. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  326. BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
  327. /* program new delta value */
  328. sh_tmu_set_next(ch, delta, 0);
  329. return 0;
  330. }
  331. static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
  332. {
  333. pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  334. }
  335. static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
  336. {
  337. pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  338. }
  339. static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
  340. const char *name)
  341. {
  342. struct clock_event_device *ced = &ch->ced;
  343. int ret;
  344. ced->name = name;
  345. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  346. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  347. ced->rating = 200;
  348. ced->cpumask = cpumask_of(0);
  349. ced->set_next_event = sh_tmu_clock_event_next;
  350. ced->set_mode = sh_tmu_clock_event_mode;
  351. ced->suspend = sh_tmu_clock_event_suspend;
  352. ced->resume = sh_tmu_clock_event_resume;
  353. dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
  354. ch->index);
  355. clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
  356. ret = request_irq(ch->irq, sh_tmu_interrupt,
  357. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  358. dev_name(&ch->tmu->pdev->dev), ch);
  359. if (ret) {
  360. dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
  361. ch->index, ch->irq);
  362. return;
  363. }
  364. }
  365. static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
  366. bool clockevent, bool clocksource)
  367. {
  368. if (clockevent) {
  369. ch->tmu->has_clockevent = true;
  370. sh_tmu_register_clockevent(ch, name);
  371. } else if (clocksource) {
  372. ch->tmu->has_clocksource = true;
  373. sh_tmu_register_clocksource(ch, name);
  374. }
  375. return 0;
  376. }
  377. static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
  378. bool clockevent, bool clocksource,
  379. struct sh_tmu_device *tmu)
  380. {
  381. /* Skip unused channels. */
  382. if (!clockevent && !clocksource)
  383. return 0;
  384. ch->tmu = tmu;
  385. if (tmu->model == SH_TMU_LEGACY) {
  386. struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
  387. /*
  388. * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
  389. * channel registers blocks at base + 2 + 12 * index, while all
  390. * other variants map them at base + 4 + 12 * index. We can
  391. * compute the index by just dividing by 12, the 2 bytes or 4
  392. * bytes offset being hidden by the integer division.
  393. */
  394. ch->index = cfg->channel_offset / 12;
  395. ch->base = tmu->mapbase + cfg->channel_offset;
  396. } else {
  397. ch->index = index;
  398. if (tmu->model == SH_TMU_SH3)
  399. ch->base = tmu->mapbase + 4 + ch->index * 12;
  400. else
  401. ch->base = tmu->mapbase + 8 + ch->index * 12;
  402. }
  403. ch->irq = platform_get_irq(tmu->pdev, index);
  404. if (ch->irq < 0) {
  405. dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
  406. ch->index);
  407. return ch->irq;
  408. }
  409. ch->cs_enabled = false;
  410. ch->enable_count = 0;
  411. return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
  412. clockevent, clocksource);
  413. }
  414. static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
  415. {
  416. struct resource *res;
  417. res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
  418. if (!res) {
  419. dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
  420. return -ENXIO;
  421. }
  422. tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
  423. if (tmu->mapbase == NULL)
  424. return -ENXIO;
  425. /*
  426. * In legacy platform device configuration (with one device per channel)
  427. * the resource points to the channel base address.
  428. */
  429. if (tmu->model == SH_TMU_LEGACY) {
  430. struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
  431. tmu->mapbase -= cfg->channel_offset;
  432. }
  433. return 0;
  434. }
  435. static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu)
  436. {
  437. if (tmu->model == SH_TMU_LEGACY) {
  438. struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
  439. tmu->mapbase += cfg->channel_offset;
  440. }
  441. iounmap(tmu->mapbase);
  442. }
  443. static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
  444. {
  445. struct sh_timer_config *cfg = pdev->dev.platform_data;
  446. const struct platform_device_id *id = pdev->id_entry;
  447. unsigned int i;
  448. int ret;
  449. if (!cfg) {
  450. dev_err(&tmu->pdev->dev, "missing platform data\n");
  451. return -ENXIO;
  452. }
  453. tmu->pdev = pdev;
  454. tmu->model = id->driver_data;
  455. /* Get hold of clock. */
  456. tmu->clk = clk_get(&tmu->pdev->dev,
  457. tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
  458. if (IS_ERR(tmu->clk)) {
  459. dev_err(&tmu->pdev->dev, "cannot get clock\n");
  460. return PTR_ERR(tmu->clk);
  461. }
  462. ret = clk_prepare(tmu->clk);
  463. if (ret < 0)
  464. goto err_clk_put;
  465. /* Map the memory resource. */
  466. ret = sh_tmu_map_memory(tmu);
  467. if (ret < 0) {
  468. dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
  469. goto err_clk_unprepare;
  470. }
  471. /* Allocate and setup the channels. */
  472. if (tmu->model == SH_TMU_LEGACY)
  473. tmu->num_channels = 1;
  474. else
  475. tmu->num_channels = hweight8(cfg->channels_mask);
  476. tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
  477. GFP_KERNEL);
  478. if (tmu->channels == NULL) {
  479. ret = -ENOMEM;
  480. goto err_unmap;
  481. }
  482. if (tmu->model == SH_TMU_LEGACY) {
  483. ret = sh_tmu_channel_setup(&tmu->channels[0], 0,
  484. cfg->clockevent_rating != 0,
  485. cfg->clocksource_rating != 0, tmu);
  486. if (ret < 0)
  487. goto err_unmap;
  488. } else {
  489. /*
  490. * Use the first channel as a clock event device and the second
  491. * channel as a clock source.
  492. */
  493. for (i = 0; i < tmu->num_channels; ++i) {
  494. ret = sh_tmu_channel_setup(&tmu->channels[i], i,
  495. i == 0, i == 1, tmu);
  496. if (ret < 0)
  497. goto err_unmap;
  498. }
  499. }
  500. platform_set_drvdata(pdev, tmu);
  501. return 0;
  502. err_unmap:
  503. kfree(tmu->channels);
  504. sh_tmu_unmap_memory(tmu);
  505. err_clk_unprepare:
  506. clk_unprepare(tmu->clk);
  507. err_clk_put:
  508. clk_put(tmu->clk);
  509. return ret;
  510. }
  511. static int sh_tmu_probe(struct platform_device *pdev)
  512. {
  513. struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
  514. int ret;
  515. if (!is_early_platform_device(pdev)) {
  516. pm_runtime_set_active(&pdev->dev);
  517. pm_runtime_enable(&pdev->dev);
  518. }
  519. if (tmu) {
  520. dev_info(&pdev->dev, "kept as earlytimer\n");
  521. goto out;
  522. }
  523. tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
  524. if (tmu == NULL)
  525. return -ENOMEM;
  526. ret = sh_tmu_setup(tmu, pdev);
  527. if (ret) {
  528. kfree(tmu);
  529. pm_runtime_idle(&pdev->dev);
  530. return ret;
  531. }
  532. if (is_early_platform_device(pdev))
  533. return 0;
  534. out:
  535. if (tmu->has_clockevent || tmu->has_clocksource)
  536. pm_runtime_irq_safe(&pdev->dev);
  537. else
  538. pm_runtime_idle(&pdev->dev);
  539. return 0;
  540. }
  541. static int sh_tmu_remove(struct platform_device *pdev)
  542. {
  543. return -EBUSY; /* cannot unregister clockevent and clocksource */
  544. }
  545. static const struct platform_device_id sh_tmu_id_table[] = {
  546. { "sh_tmu", SH_TMU_LEGACY },
  547. { "sh-tmu", SH_TMU },
  548. { "sh-tmu-sh3", SH_TMU_SH3 },
  549. { }
  550. };
  551. MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
  552. static struct platform_driver sh_tmu_device_driver = {
  553. .probe = sh_tmu_probe,
  554. .remove = sh_tmu_remove,
  555. .driver = {
  556. .name = "sh_tmu",
  557. },
  558. .id_table = sh_tmu_id_table,
  559. };
  560. static int __init sh_tmu_init(void)
  561. {
  562. return platform_driver_register(&sh_tmu_device_driver);
  563. }
  564. static void __exit sh_tmu_exit(void)
  565. {
  566. platform_driver_unregister(&sh_tmu_device_driver);
  567. }
  568. early_platform_init("earlytimer", &sh_tmu_device_driver);
  569. subsys_initcall(sh_tmu_init);
  570. module_exit(sh_tmu_exit);
  571. MODULE_AUTHOR("Magnus Damm");
  572. MODULE_DESCRIPTION("SuperH TMU Timer Driver");
  573. MODULE_LICENSE("GPL v2");