sh_mtu2.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * SuperH Timer Support - MTU2
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/delay.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/ioport.h>
  23. #include <linux/irq.h>
  24. #include <linux/module.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/pm_domain.h>
  27. #include <linux/pm_runtime.h>
  28. #include <linux/sh_timer.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. struct sh_mtu2_device;
  32. struct sh_mtu2_channel {
  33. struct sh_mtu2_device *mtu;
  34. unsigned int index;
  35. void __iomem *base;
  36. int irq;
  37. struct clock_event_device ced;
  38. };
  39. struct sh_mtu2_device {
  40. struct platform_device *pdev;
  41. void __iomem *mapbase;
  42. struct clk *clk;
  43. struct sh_mtu2_channel *channels;
  44. unsigned int num_channels;
  45. bool legacy;
  46. bool has_clockevent;
  47. };
  48. static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
  49. #define TSTR -1 /* shared register */
  50. #define TCR 0 /* channel register */
  51. #define TMDR 1 /* channel register */
  52. #define TIOR 2 /* channel register */
  53. #define TIER 3 /* channel register */
  54. #define TSR 4 /* channel register */
  55. #define TCNT 5 /* channel register */
  56. #define TGR 6 /* channel register */
  57. #define TCR_CCLR_NONE (0 << 5)
  58. #define TCR_CCLR_TGRA (1 << 5)
  59. #define TCR_CCLR_TGRB (2 << 5)
  60. #define TCR_CCLR_SYNC (3 << 5)
  61. #define TCR_CCLR_TGRC (5 << 5)
  62. #define TCR_CCLR_TGRD (6 << 5)
  63. #define TCR_CCLR_MASK (7 << 5)
  64. #define TCR_CKEG_RISING (0 << 3)
  65. #define TCR_CKEG_FALLING (1 << 3)
  66. #define TCR_CKEG_BOTH (2 << 3)
  67. #define TCR_CKEG_MASK (3 << 3)
  68. /* Values 4 to 7 are channel-dependent */
  69. #define TCR_TPSC_P1 (0 << 0)
  70. #define TCR_TPSC_P4 (1 << 0)
  71. #define TCR_TPSC_P16 (2 << 0)
  72. #define TCR_TPSC_P64 (3 << 0)
  73. #define TCR_TPSC_CH0_TCLKA (4 << 0)
  74. #define TCR_TPSC_CH0_TCLKB (5 << 0)
  75. #define TCR_TPSC_CH0_TCLKC (6 << 0)
  76. #define TCR_TPSC_CH0_TCLKD (7 << 0)
  77. #define TCR_TPSC_CH1_TCLKA (4 << 0)
  78. #define TCR_TPSC_CH1_TCLKB (5 << 0)
  79. #define TCR_TPSC_CH1_P256 (6 << 0)
  80. #define TCR_TPSC_CH1_TCNT2 (7 << 0)
  81. #define TCR_TPSC_CH2_TCLKA (4 << 0)
  82. #define TCR_TPSC_CH2_TCLKB (5 << 0)
  83. #define TCR_TPSC_CH2_TCLKC (6 << 0)
  84. #define TCR_TPSC_CH2_P1024 (7 << 0)
  85. #define TCR_TPSC_CH34_P256 (4 << 0)
  86. #define TCR_TPSC_CH34_P1024 (5 << 0)
  87. #define TCR_TPSC_CH34_TCLKA (6 << 0)
  88. #define TCR_TPSC_CH34_TCLKB (7 << 0)
  89. #define TCR_TPSC_MASK (7 << 0)
  90. #define TMDR_BFE (1 << 6)
  91. #define TMDR_BFB (1 << 5)
  92. #define TMDR_BFA (1 << 4)
  93. #define TMDR_MD_NORMAL (0 << 0)
  94. #define TMDR_MD_PWM_1 (2 << 0)
  95. #define TMDR_MD_PWM_2 (3 << 0)
  96. #define TMDR_MD_PHASE_1 (4 << 0)
  97. #define TMDR_MD_PHASE_2 (5 << 0)
  98. #define TMDR_MD_PHASE_3 (6 << 0)
  99. #define TMDR_MD_PHASE_4 (7 << 0)
  100. #define TMDR_MD_PWM_SYNC (8 << 0)
  101. #define TMDR_MD_PWM_COMP_CREST (13 << 0)
  102. #define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
  103. #define TMDR_MD_PWM_COMP_BOTH (15 << 0)
  104. #define TMDR_MD_MASK (15 << 0)
  105. #define TIOC_IOCH(n) ((n) << 4)
  106. #define TIOC_IOCL(n) ((n) << 0)
  107. #define TIOR_OC_RETAIN (0 << 0)
  108. #define TIOR_OC_0_CLEAR (1 << 0)
  109. #define TIOR_OC_0_SET (2 << 0)
  110. #define TIOR_OC_0_TOGGLE (3 << 0)
  111. #define TIOR_OC_1_CLEAR (5 << 0)
  112. #define TIOR_OC_1_SET (6 << 0)
  113. #define TIOR_OC_1_TOGGLE (7 << 0)
  114. #define TIOR_IC_RISING (8 << 0)
  115. #define TIOR_IC_FALLING (9 << 0)
  116. #define TIOR_IC_BOTH (10 << 0)
  117. #define TIOR_IC_TCNT (12 << 0)
  118. #define TIOR_MASK (15 << 0)
  119. #define TIER_TTGE (1 << 7)
  120. #define TIER_TTGE2 (1 << 6)
  121. #define TIER_TCIEU (1 << 5)
  122. #define TIER_TCIEV (1 << 4)
  123. #define TIER_TGIED (1 << 3)
  124. #define TIER_TGIEC (1 << 2)
  125. #define TIER_TGIEB (1 << 1)
  126. #define TIER_TGIEA (1 << 0)
  127. #define TSR_TCFD (1 << 7)
  128. #define TSR_TCFU (1 << 5)
  129. #define TSR_TCFV (1 << 4)
  130. #define TSR_TGFD (1 << 3)
  131. #define TSR_TGFC (1 << 2)
  132. #define TSR_TGFB (1 << 1)
  133. #define TSR_TGFA (1 << 0)
  134. static unsigned long mtu2_reg_offs[] = {
  135. [TCR] = 0,
  136. [TMDR] = 1,
  137. [TIOR] = 2,
  138. [TIER] = 4,
  139. [TSR] = 5,
  140. [TCNT] = 6,
  141. [TGR] = 8,
  142. };
  143. static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
  144. {
  145. unsigned long offs;
  146. if (reg_nr == TSTR) {
  147. if (ch->mtu->legacy)
  148. return ioread8(ch->mtu->mapbase);
  149. else
  150. return ioread8(ch->mtu->mapbase + 0x280);
  151. }
  152. offs = mtu2_reg_offs[reg_nr];
  153. if ((reg_nr == TCNT) || (reg_nr == TGR))
  154. return ioread16(ch->base + offs);
  155. else
  156. return ioread8(ch->base + offs);
  157. }
  158. static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
  159. unsigned long value)
  160. {
  161. unsigned long offs;
  162. if (reg_nr == TSTR) {
  163. if (ch->mtu->legacy)
  164. return iowrite8(value, ch->mtu->mapbase);
  165. else
  166. return iowrite8(value, ch->mtu->mapbase + 0x280);
  167. }
  168. offs = mtu2_reg_offs[reg_nr];
  169. if ((reg_nr == TCNT) || (reg_nr == TGR))
  170. iowrite16(value, ch->base + offs);
  171. else
  172. iowrite8(value, ch->base + offs);
  173. }
  174. static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
  175. {
  176. unsigned long flags, value;
  177. /* start stop register shared by multiple timer channels */
  178. raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
  179. value = sh_mtu2_read(ch, TSTR);
  180. if (start)
  181. value |= 1 << ch->index;
  182. else
  183. value &= ~(1 << ch->index);
  184. sh_mtu2_write(ch, TSTR, value);
  185. raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
  186. }
  187. static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
  188. {
  189. unsigned long periodic;
  190. unsigned long rate;
  191. int ret;
  192. pm_runtime_get_sync(&ch->mtu->pdev->dev);
  193. dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
  194. /* enable clock */
  195. ret = clk_enable(ch->mtu->clk);
  196. if (ret) {
  197. dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
  198. ch->index);
  199. return ret;
  200. }
  201. /* make sure channel is disabled */
  202. sh_mtu2_start_stop_ch(ch, 0);
  203. rate = clk_get_rate(ch->mtu->clk) / 64;
  204. periodic = (rate + HZ/2) / HZ;
  205. /*
  206. * "Periodic Counter Operation"
  207. * Clear on TGRA compare match, divide clock by 64.
  208. */
  209. sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
  210. sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
  211. TIOC_IOCL(TIOR_OC_0_CLEAR));
  212. sh_mtu2_write(ch, TGR, periodic);
  213. sh_mtu2_write(ch, TCNT, 0);
  214. sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
  215. sh_mtu2_write(ch, TIER, TIER_TGIEA);
  216. /* enable channel */
  217. sh_mtu2_start_stop_ch(ch, 1);
  218. return 0;
  219. }
  220. static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
  221. {
  222. /* disable channel */
  223. sh_mtu2_start_stop_ch(ch, 0);
  224. /* stop clock */
  225. clk_disable(ch->mtu->clk);
  226. dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
  227. pm_runtime_put(&ch->mtu->pdev->dev);
  228. }
  229. static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
  230. {
  231. struct sh_mtu2_channel *ch = dev_id;
  232. /* acknowledge interrupt */
  233. sh_mtu2_read(ch, TSR);
  234. sh_mtu2_write(ch, TSR, ~TSR_TGFA);
  235. /* notify clockevent layer */
  236. ch->ced.event_handler(&ch->ced);
  237. return IRQ_HANDLED;
  238. }
  239. static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
  240. {
  241. return container_of(ced, struct sh_mtu2_channel, ced);
  242. }
  243. static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
  244. struct clock_event_device *ced)
  245. {
  246. struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
  247. int disabled = 0;
  248. /* deal with old setting first */
  249. switch (ced->mode) {
  250. case CLOCK_EVT_MODE_PERIODIC:
  251. sh_mtu2_disable(ch);
  252. disabled = 1;
  253. break;
  254. default:
  255. break;
  256. }
  257. switch (mode) {
  258. case CLOCK_EVT_MODE_PERIODIC:
  259. dev_info(&ch->mtu->pdev->dev,
  260. "ch%u: used for periodic clock events\n", ch->index);
  261. sh_mtu2_enable(ch);
  262. break;
  263. case CLOCK_EVT_MODE_UNUSED:
  264. if (!disabled)
  265. sh_mtu2_disable(ch);
  266. break;
  267. case CLOCK_EVT_MODE_SHUTDOWN:
  268. default:
  269. break;
  270. }
  271. }
  272. static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
  273. {
  274. pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
  275. }
  276. static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
  277. {
  278. pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
  279. }
  280. static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
  281. const char *name)
  282. {
  283. struct clock_event_device *ced = &ch->ced;
  284. int ret;
  285. ced->name = name;
  286. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  287. ced->rating = 200;
  288. ced->cpumask = cpu_possible_mask;
  289. ced->set_mode = sh_mtu2_clock_event_mode;
  290. ced->suspend = sh_mtu2_clock_event_suspend;
  291. ced->resume = sh_mtu2_clock_event_resume;
  292. dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
  293. ch->index);
  294. clockevents_register_device(ced);
  295. ret = request_irq(ch->irq, sh_mtu2_interrupt,
  296. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  297. dev_name(&ch->mtu->pdev->dev), ch);
  298. if (ret) {
  299. dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
  300. ch->index, ch->irq);
  301. return;
  302. }
  303. }
  304. static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name,
  305. bool clockevent)
  306. {
  307. if (clockevent) {
  308. ch->mtu->has_clockevent = true;
  309. sh_mtu2_register_clockevent(ch, name);
  310. }
  311. return 0;
  312. }
  313. static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
  314. struct sh_mtu2_device *mtu)
  315. {
  316. static const unsigned int channel_offsets[] = {
  317. 0x300, 0x380, 0x000,
  318. };
  319. bool clockevent;
  320. ch->mtu = mtu;
  321. if (mtu->legacy) {
  322. struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
  323. clockevent = cfg->clockevent_rating != 0;
  324. ch->irq = platform_get_irq(mtu->pdev, 0);
  325. ch->base = mtu->mapbase - cfg->channel_offset;
  326. ch->index = cfg->timer_bit;
  327. } else {
  328. char name[6];
  329. clockevent = true;
  330. sprintf(name, "tgi%ua", index);
  331. ch->irq = platform_get_irq_byname(mtu->pdev, name);
  332. ch->base = mtu->mapbase + channel_offsets[index];
  333. ch->index = index;
  334. }
  335. if (ch->irq < 0) {
  336. /* Skip channels with no declared interrupt. */
  337. if (!mtu->legacy)
  338. return 0;
  339. dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n",
  340. ch->index);
  341. return ch->irq;
  342. }
  343. return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent);
  344. }
  345. static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
  346. {
  347. struct resource *res;
  348. res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
  349. if (!res) {
  350. dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
  351. return -ENXIO;
  352. }
  353. mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
  354. if (mtu->mapbase == NULL)
  355. return -ENXIO;
  356. /*
  357. * In legacy platform device configuration (with one device per channel)
  358. * the resource points to the channel base address.
  359. */
  360. if (mtu->legacy) {
  361. struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
  362. mtu->mapbase += cfg->channel_offset;
  363. }
  364. return 0;
  365. }
  366. static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
  367. {
  368. if (mtu->legacy) {
  369. struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
  370. mtu->mapbase -= cfg->channel_offset;
  371. }
  372. iounmap(mtu->mapbase);
  373. }
  374. static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
  375. struct platform_device *pdev)
  376. {
  377. struct sh_timer_config *cfg = pdev->dev.platform_data;
  378. const struct platform_device_id *id = pdev->id_entry;
  379. unsigned int i;
  380. int ret;
  381. mtu->pdev = pdev;
  382. mtu->legacy = id->driver_data;
  383. if (mtu->legacy && !cfg) {
  384. dev_err(&mtu->pdev->dev, "missing platform data\n");
  385. return -ENXIO;
  386. }
  387. /* Get hold of clock. */
  388. mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck");
  389. if (IS_ERR(mtu->clk)) {
  390. dev_err(&mtu->pdev->dev, "cannot get clock\n");
  391. return PTR_ERR(mtu->clk);
  392. }
  393. ret = clk_prepare(mtu->clk);
  394. if (ret < 0)
  395. goto err_clk_put;
  396. /* Map the memory resource. */
  397. ret = sh_mtu2_map_memory(mtu);
  398. if (ret < 0) {
  399. dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
  400. goto err_clk_unprepare;
  401. }
  402. /* Allocate and setup the channels. */
  403. if (mtu->legacy)
  404. mtu->num_channels = 1;
  405. else
  406. mtu->num_channels = 3;
  407. mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
  408. GFP_KERNEL);
  409. if (mtu->channels == NULL) {
  410. ret = -ENOMEM;
  411. goto err_unmap;
  412. }
  413. if (mtu->legacy) {
  414. ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu);
  415. if (ret < 0)
  416. goto err_unmap;
  417. } else {
  418. for (i = 0; i < mtu->num_channels; ++i) {
  419. ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
  420. if (ret < 0)
  421. goto err_unmap;
  422. }
  423. }
  424. platform_set_drvdata(pdev, mtu);
  425. return 0;
  426. err_unmap:
  427. kfree(mtu->channels);
  428. sh_mtu2_unmap_memory(mtu);
  429. err_clk_unprepare:
  430. clk_unprepare(mtu->clk);
  431. err_clk_put:
  432. clk_put(mtu->clk);
  433. return ret;
  434. }
  435. static int sh_mtu2_probe(struct platform_device *pdev)
  436. {
  437. struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
  438. int ret;
  439. if (!is_early_platform_device(pdev)) {
  440. pm_runtime_set_active(&pdev->dev);
  441. pm_runtime_enable(&pdev->dev);
  442. }
  443. if (mtu) {
  444. dev_info(&pdev->dev, "kept as earlytimer\n");
  445. goto out;
  446. }
  447. mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
  448. if (mtu == NULL)
  449. return -ENOMEM;
  450. ret = sh_mtu2_setup(mtu, pdev);
  451. if (ret) {
  452. kfree(mtu);
  453. pm_runtime_idle(&pdev->dev);
  454. return ret;
  455. }
  456. if (is_early_platform_device(pdev))
  457. return 0;
  458. out:
  459. if (mtu->has_clockevent)
  460. pm_runtime_irq_safe(&pdev->dev);
  461. else
  462. pm_runtime_idle(&pdev->dev);
  463. return 0;
  464. }
  465. static int sh_mtu2_remove(struct platform_device *pdev)
  466. {
  467. return -EBUSY; /* cannot unregister clockevent */
  468. }
  469. static const struct platform_device_id sh_mtu2_id_table[] = {
  470. { "sh_mtu2", 1 },
  471. { "sh-mtu2", 0 },
  472. { },
  473. };
  474. MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
  475. static struct platform_driver sh_mtu2_device_driver = {
  476. .probe = sh_mtu2_probe,
  477. .remove = sh_mtu2_remove,
  478. .driver = {
  479. .name = "sh_mtu2",
  480. },
  481. .id_table = sh_mtu2_id_table,
  482. };
  483. static int __init sh_mtu2_init(void)
  484. {
  485. return platform_driver_register(&sh_mtu2_device_driver);
  486. }
  487. static void __exit sh_mtu2_exit(void)
  488. {
  489. platform_driver_unregister(&sh_mtu2_device_driver);
  490. }
  491. early_platform_init("earlytimer", &sh_mtu2_device_driver);
  492. subsys_initcall(sh_mtu2_init);
  493. module_exit(sh_mtu2_exit);
  494. MODULE_AUTHOR("Magnus Damm");
  495. MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
  496. MODULE_LICENSE("GPL v2");