sh_mtu2.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SuperH Timer Support - MTU2
  4. *
  5. * Copyright (C) 2009 Magnus Damm
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/clockchips.h>
  9. #include <linux/delay.h>
  10. #include <linux/err.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/ioport.h>
  15. #include <linux/irq.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_domain.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/sh_timer.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. struct sh_mtu2_device;
  25. struct sh_mtu2_channel {
  26. struct sh_mtu2_device *mtu;
  27. unsigned int index;
  28. void __iomem *base;
  29. struct clock_event_device ced;
  30. };
  31. struct sh_mtu2_device {
  32. struct platform_device *pdev;
  33. void __iomem *mapbase;
  34. struct clk *clk;
  35. raw_spinlock_t lock; /* Protect the shared registers */
  36. struct sh_mtu2_channel *channels;
  37. unsigned int num_channels;
  38. bool has_clockevent;
  39. };
  40. #define TSTR -1 /* shared register */
  41. #define TCR 0 /* channel register */
  42. #define TMDR 1 /* channel register */
  43. #define TIOR 2 /* channel register */
  44. #define TIER 3 /* channel register */
  45. #define TSR 4 /* channel register */
  46. #define TCNT 5 /* channel register */
  47. #define TGR 6 /* channel register */
  48. #define TCR_CCLR_NONE (0 << 5)
  49. #define TCR_CCLR_TGRA (1 << 5)
  50. #define TCR_CCLR_TGRB (2 << 5)
  51. #define TCR_CCLR_SYNC (3 << 5)
  52. #define TCR_CCLR_TGRC (5 << 5)
  53. #define TCR_CCLR_TGRD (6 << 5)
  54. #define TCR_CCLR_MASK (7 << 5)
  55. #define TCR_CKEG_RISING (0 << 3)
  56. #define TCR_CKEG_FALLING (1 << 3)
  57. #define TCR_CKEG_BOTH (2 << 3)
  58. #define TCR_CKEG_MASK (3 << 3)
  59. /* Values 4 to 7 are channel-dependent */
  60. #define TCR_TPSC_P1 (0 << 0)
  61. #define TCR_TPSC_P4 (1 << 0)
  62. #define TCR_TPSC_P16 (2 << 0)
  63. #define TCR_TPSC_P64 (3 << 0)
  64. #define TCR_TPSC_CH0_TCLKA (4 << 0)
  65. #define TCR_TPSC_CH0_TCLKB (5 << 0)
  66. #define TCR_TPSC_CH0_TCLKC (6 << 0)
  67. #define TCR_TPSC_CH0_TCLKD (7 << 0)
  68. #define TCR_TPSC_CH1_TCLKA (4 << 0)
  69. #define TCR_TPSC_CH1_TCLKB (5 << 0)
  70. #define TCR_TPSC_CH1_P256 (6 << 0)
  71. #define TCR_TPSC_CH1_TCNT2 (7 << 0)
  72. #define TCR_TPSC_CH2_TCLKA (4 << 0)
  73. #define TCR_TPSC_CH2_TCLKB (5 << 0)
  74. #define TCR_TPSC_CH2_TCLKC (6 << 0)
  75. #define TCR_TPSC_CH2_P1024 (7 << 0)
  76. #define TCR_TPSC_CH34_P256 (4 << 0)
  77. #define TCR_TPSC_CH34_P1024 (5 << 0)
  78. #define TCR_TPSC_CH34_TCLKA (6 << 0)
  79. #define TCR_TPSC_CH34_TCLKB (7 << 0)
  80. #define TCR_TPSC_MASK (7 << 0)
  81. #define TMDR_BFE (1 << 6)
  82. #define TMDR_BFB (1 << 5)
  83. #define TMDR_BFA (1 << 4)
  84. #define TMDR_MD_NORMAL (0 << 0)
  85. #define TMDR_MD_PWM_1 (2 << 0)
  86. #define TMDR_MD_PWM_2 (3 << 0)
  87. #define TMDR_MD_PHASE_1 (4 << 0)
  88. #define TMDR_MD_PHASE_2 (5 << 0)
  89. #define TMDR_MD_PHASE_3 (6 << 0)
  90. #define TMDR_MD_PHASE_4 (7 << 0)
  91. #define TMDR_MD_PWM_SYNC (8 << 0)
  92. #define TMDR_MD_PWM_COMP_CREST (13 << 0)
  93. #define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
  94. #define TMDR_MD_PWM_COMP_BOTH (15 << 0)
  95. #define TMDR_MD_MASK (15 << 0)
  96. #define TIOC_IOCH(n) ((n) << 4)
  97. #define TIOC_IOCL(n) ((n) << 0)
  98. #define TIOR_OC_RETAIN (0 << 0)
  99. #define TIOR_OC_0_CLEAR (1 << 0)
  100. #define TIOR_OC_0_SET (2 << 0)
  101. #define TIOR_OC_0_TOGGLE (3 << 0)
  102. #define TIOR_OC_1_CLEAR (5 << 0)
  103. #define TIOR_OC_1_SET (6 << 0)
  104. #define TIOR_OC_1_TOGGLE (7 << 0)
  105. #define TIOR_IC_RISING (8 << 0)
  106. #define TIOR_IC_FALLING (9 << 0)
  107. #define TIOR_IC_BOTH (10 << 0)
  108. #define TIOR_IC_TCNT (12 << 0)
  109. #define TIOR_MASK (15 << 0)
  110. #define TIER_TTGE (1 << 7)
  111. #define TIER_TTGE2 (1 << 6)
  112. #define TIER_TCIEU (1 << 5)
  113. #define TIER_TCIEV (1 << 4)
  114. #define TIER_TGIED (1 << 3)
  115. #define TIER_TGIEC (1 << 2)
  116. #define TIER_TGIEB (1 << 1)
  117. #define TIER_TGIEA (1 << 0)
  118. #define TSR_TCFD (1 << 7)
  119. #define TSR_TCFU (1 << 5)
  120. #define TSR_TCFV (1 << 4)
  121. #define TSR_TGFD (1 << 3)
  122. #define TSR_TGFC (1 << 2)
  123. #define TSR_TGFB (1 << 1)
  124. #define TSR_TGFA (1 << 0)
  125. static unsigned long mtu2_reg_offs[] = {
  126. [TCR] = 0,
  127. [TMDR] = 1,
  128. [TIOR] = 2,
  129. [TIER] = 4,
  130. [TSR] = 5,
  131. [TCNT] = 6,
  132. [TGR] = 8,
  133. };
  134. static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
  135. {
  136. unsigned long offs;
  137. if (reg_nr == TSTR)
  138. return ioread8(ch->mtu->mapbase + 0x280);
  139. offs = mtu2_reg_offs[reg_nr];
  140. if ((reg_nr == TCNT) || (reg_nr == TGR))
  141. return ioread16(ch->base + offs);
  142. else
  143. return ioread8(ch->base + offs);
  144. }
  145. static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
  146. unsigned long value)
  147. {
  148. unsigned long offs;
  149. if (reg_nr == TSTR)
  150. return iowrite8(value, ch->mtu->mapbase + 0x280);
  151. offs = mtu2_reg_offs[reg_nr];
  152. if ((reg_nr == TCNT) || (reg_nr == TGR))
  153. iowrite16(value, ch->base + offs);
  154. else
  155. iowrite8(value, ch->base + offs);
  156. }
  157. static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
  158. {
  159. unsigned long flags, value;
  160. /* start stop register shared by multiple timer channels */
  161. raw_spin_lock_irqsave(&ch->mtu->lock, flags);
  162. value = sh_mtu2_read(ch, TSTR);
  163. if (start)
  164. value |= 1 << ch->index;
  165. else
  166. value &= ~(1 << ch->index);
  167. sh_mtu2_write(ch, TSTR, value);
  168. raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
  169. }
  170. static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
  171. {
  172. unsigned long periodic;
  173. unsigned long rate;
  174. int ret;
  175. pm_runtime_get_sync(&ch->mtu->pdev->dev);
  176. dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
  177. /* enable clock */
  178. ret = clk_enable(ch->mtu->clk);
  179. if (ret) {
  180. dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
  181. ch->index);
  182. return ret;
  183. }
  184. /* make sure channel is disabled */
  185. sh_mtu2_start_stop_ch(ch, 0);
  186. rate = clk_get_rate(ch->mtu->clk) / 64;
  187. periodic = (rate + HZ/2) / HZ;
  188. /*
  189. * "Periodic Counter Operation"
  190. * Clear on TGRA compare match, divide clock by 64.
  191. */
  192. sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
  193. sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
  194. TIOC_IOCL(TIOR_OC_0_CLEAR));
  195. sh_mtu2_write(ch, TGR, periodic);
  196. sh_mtu2_write(ch, TCNT, 0);
  197. sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
  198. sh_mtu2_write(ch, TIER, TIER_TGIEA);
  199. /* enable channel */
  200. sh_mtu2_start_stop_ch(ch, 1);
  201. return 0;
  202. }
  203. static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
  204. {
  205. /* disable channel */
  206. sh_mtu2_start_stop_ch(ch, 0);
  207. /* stop clock */
  208. clk_disable(ch->mtu->clk);
  209. dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
  210. pm_runtime_put(&ch->mtu->pdev->dev);
  211. }
  212. static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
  213. {
  214. struct sh_mtu2_channel *ch = dev_id;
  215. /* acknowledge interrupt */
  216. sh_mtu2_read(ch, TSR);
  217. sh_mtu2_write(ch, TSR, ~TSR_TGFA);
  218. /* notify clockevent layer */
  219. ch->ced.event_handler(&ch->ced);
  220. return IRQ_HANDLED;
  221. }
  222. static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
  223. {
  224. return container_of(ced, struct sh_mtu2_channel, ced);
  225. }
  226. static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
  227. {
  228. struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
  229. if (clockevent_state_periodic(ced))
  230. sh_mtu2_disable(ch);
  231. return 0;
  232. }
  233. static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
  234. {
  235. struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
  236. if (clockevent_state_periodic(ced))
  237. sh_mtu2_disable(ch);
  238. dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
  239. ch->index);
  240. sh_mtu2_enable(ch);
  241. return 0;
  242. }
  243. static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
  244. {
  245. pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
  246. }
  247. static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
  248. {
  249. pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
  250. }
  251. static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
  252. const char *name)
  253. {
  254. struct clock_event_device *ced = &ch->ced;
  255. ced->name = name;
  256. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  257. ced->rating = 200;
  258. ced->cpumask = cpu_possible_mask;
  259. ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
  260. ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
  261. ced->suspend = sh_mtu2_clock_event_suspend;
  262. ced->resume = sh_mtu2_clock_event_resume;
  263. dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
  264. ch->index);
  265. clockevents_register_device(ced);
  266. }
  267. static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
  268. {
  269. ch->mtu->has_clockevent = true;
  270. sh_mtu2_register_clockevent(ch, name);
  271. return 0;
  272. }
  273. static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
  274. struct sh_mtu2_device *mtu)
  275. {
  276. static const unsigned int channel_offsets[] = {
  277. 0x300, 0x380, 0x000,
  278. };
  279. char name[6];
  280. int irq;
  281. int ret;
  282. ch->mtu = mtu;
  283. sprintf(name, "tgi%ua", index);
  284. irq = platform_get_irq_byname(mtu->pdev, name);
  285. if (irq < 0) {
  286. /* Skip channels with no declared interrupt. */
  287. return 0;
  288. }
  289. ret = request_irq(irq, sh_mtu2_interrupt,
  290. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  291. dev_name(&ch->mtu->pdev->dev), ch);
  292. if (ret) {
  293. dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
  294. index, irq);
  295. return ret;
  296. }
  297. ch->base = mtu->mapbase + channel_offsets[index];
  298. ch->index = index;
  299. return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
  300. }
  301. static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
  302. {
  303. struct resource *res;
  304. res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
  305. if (!res) {
  306. dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
  307. return -ENXIO;
  308. }
  309. mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
  310. if (mtu->mapbase == NULL)
  311. return -ENXIO;
  312. return 0;
  313. }
  314. static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
  315. struct platform_device *pdev)
  316. {
  317. unsigned int i;
  318. int ret;
  319. mtu->pdev = pdev;
  320. raw_spin_lock_init(&mtu->lock);
  321. /* Get hold of clock. */
  322. mtu->clk = clk_get(&mtu->pdev->dev, "fck");
  323. if (IS_ERR(mtu->clk)) {
  324. dev_err(&mtu->pdev->dev, "cannot get clock\n");
  325. return PTR_ERR(mtu->clk);
  326. }
  327. ret = clk_prepare(mtu->clk);
  328. if (ret < 0)
  329. goto err_clk_put;
  330. /* Map the memory resource. */
  331. ret = sh_mtu2_map_memory(mtu);
  332. if (ret < 0) {
  333. dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
  334. goto err_clk_unprepare;
  335. }
  336. /* Allocate and setup the channels. */
  337. mtu->num_channels = 3;
  338. mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
  339. GFP_KERNEL);
  340. if (mtu->channels == NULL) {
  341. ret = -ENOMEM;
  342. goto err_unmap;
  343. }
  344. for (i = 0; i < mtu->num_channels; ++i) {
  345. ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
  346. if (ret < 0)
  347. goto err_unmap;
  348. }
  349. platform_set_drvdata(pdev, mtu);
  350. return 0;
  351. err_unmap:
  352. kfree(mtu->channels);
  353. iounmap(mtu->mapbase);
  354. err_clk_unprepare:
  355. clk_unprepare(mtu->clk);
  356. err_clk_put:
  357. clk_put(mtu->clk);
  358. return ret;
  359. }
  360. static int sh_mtu2_probe(struct platform_device *pdev)
  361. {
  362. struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
  363. int ret;
  364. if (!is_early_platform_device(pdev)) {
  365. pm_runtime_set_active(&pdev->dev);
  366. pm_runtime_enable(&pdev->dev);
  367. }
  368. if (mtu) {
  369. dev_info(&pdev->dev, "kept as earlytimer\n");
  370. goto out;
  371. }
  372. mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
  373. if (mtu == NULL)
  374. return -ENOMEM;
  375. ret = sh_mtu2_setup(mtu, pdev);
  376. if (ret) {
  377. kfree(mtu);
  378. pm_runtime_idle(&pdev->dev);
  379. return ret;
  380. }
  381. if (is_early_platform_device(pdev))
  382. return 0;
  383. out:
  384. if (mtu->has_clockevent)
  385. pm_runtime_irq_safe(&pdev->dev);
  386. else
  387. pm_runtime_idle(&pdev->dev);
  388. return 0;
  389. }
  390. static int sh_mtu2_remove(struct platform_device *pdev)
  391. {
  392. return -EBUSY; /* cannot unregister clockevent */
  393. }
  394. static const struct platform_device_id sh_mtu2_id_table[] = {
  395. { "sh-mtu2", 0 },
  396. { },
  397. };
  398. MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
  399. static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
  400. { .compatible = "renesas,mtu2" },
  401. { }
  402. };
  403. MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
  404. static struct platform_driver sh_mtu2_device_driver = {
  405. .probe = sh_mtu2_probe,
  406. .remove = sh_mtu2_remove,
  407. .driver = {
  408. .name = "sh_mtu2",
  409. .of_match_table = of_match_ptr(sh_mtu2_of_table),
  410. },
  411. .id_table = sh_mtu2_id_table,
  412. };
  413. static int __init sh_mtu2_init(void)
  414. {
  415. return platform_driver_register(&sh_mtu2_device_driver);
  416. }
  417. static void __exit sh_mtu2_exit(void)
  418. {
  419. platform_driver_unregister(&sh_mtu2_device_driver);
  420. }
  421. early_platform_init("earlytimer", &sh_mtu2_device_driver);
  422. subsys_initcall(sh_mtu2_init);
  423. module_exit(sh_mtu2_exit);
  424. MODULE_AUTHOR("Magnus Damm");
  425. MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
  426. MODULE_LICENSE("GPL v2");