sh_cmt.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. /*
  2. * SuperH Timer Support - CMT
  3. *
  4. * Copyright (C) 2008 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/pm_domain.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/sh_timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. struct sh_cmt_device;
  33. /*
  34. * The CMT comes in 5 different identified flavours, depending not only on the
  35. * SoC but also on the particular instance. The following table lists the main
  36. * characteristics of those flavours.
  37. *
  38. * 16B 32B 32B-F 48B 48B-2
  39. * -----------------------------------------------------------------------------
  40. * Channels 2 1/4 1 6 2/8
  41. * Control Width 16 16 16 16 32
  42. * Counter Width 16 32 32 32/48 32/48
  43. * Shared Start/Stop Y Y Y Y N
  44. *
  45. * The 48-bit gen2 version has a per-channel start/stop register located in the
  46. * channel registers block. All other versions have a shared start/stop register
  47. * located in the global space.
  48. *
  49. * Channels are indexed from 0 to N-1 in the documentation. The channel index
  50. * infers the start/stop bit position in the control register and the channel
  51. * registers block address. Some CMT instances have a subset of channels
  52. * available, in which case the index in the documentation doesn't match the
  53. * "real" index as implemented in hardware. This is for instance the case with
  54. * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  55. * in the documentation but using start/stop bit 5 and having its registers
  56. * block at 0x60.
  57. *
  58. * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  59. * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  60. */
  61. enum sh_cmt_model {
  62. SH_CMT_16BIT,
  63. SH_CMT_32BIT,
  64. SH_CMT_32BIT_FAST,
  65. SH_CMT_48BIT,
  66. SH_CMT_48BIT_GEN2,
  67. };
  68. struct sh_cmt_info {
  69. enum sh_cmt_model model;
  70. unsigned long width; /* 16 or 32 bit version of hardware block */
  71. unsigned long overflow_bit;
  72. unsigned long clear_bits;
  73. /* callbacks for CMSTR and CMCSR access */
  74. unsigned long (*read_control)(void __iomem *base, unsigned long offs);
  75. void (*write_control)(void __iomem *base, unsigned long offs,
  76. unsigned long value);
  77. /* callbacks for CMCNT and CMCOR access */
  78. unsigned long (*read_count)(void __iomem *base, unsigned long offs);
  79. void (*write_count)(void __iomem *base, unsigned long offs,
  80. unsigned long value);
  81. };
  82. struct sh_cmt_channel {
  83. struct sh_cmt_device *cmt;
  84. unsigned int index; /* Index in the documentation */
  85. unsigned int hwidx; /* Real hardware index */
  86. void __iomem *iostart;
  87. void __iomem *ioctrl;
  88. unsigned int timer_bit;
  89. unsigned long flags;
  90. unsigned long match_value;
  91. unsigned long next_match_value;
  92. unsigned long max_match_value;
  93. unsigned long rate;
  94. raw_spinlock_t lock;
  95. struct clock_event_device ced;
  96. struct clocksource cs;
  97. unsigned long total_cycles;
  98. bool cs_enabled;
  99. };
  100. struct sh_cmt_device {
  101. struct platform_device *pdev;
  102. const struct sh_cmt_info *info;
  103. bool legacy;
  104. void __iomem *mapbase_ch;
  105. void __iomem *mapbase;
  106. struct clk *clk;
  107. struct sh_cmt_channel *channels;
  108. unsigned int num_channels;
  109. bool has_clockevent;
  110. bool has_clocksource;
  111. };
  112. #define SH_CMT16_CMCSR_CMF (1 << 7)
  113. #define SH_CMT16_CMCSR_CMIE (1 << 6)
  114. #define SH_CMT16_CMCSR_CKS8 (0 << 0)
  115. #define SH_CMT16_CMCSR_CKS32 (1 << 0)
  116. #define SH_CMT16_CMCSR_CKS128 (2 << 0)
  117. #define SH_CMT16_CMCSR_CKS512 (3 << 0)
  118. #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
  119. #define SH_CMT32_CMCSR_CMF (1 << 15)
  120. #define SH_CMT32_CMCSR_OVF (1 << 14)
  121. #define SH_CMT32_CMCSR_WRFLG (1 << 13)
  122. #define SH_CMT32_CMCSR_STTF (1 << 12)
  123. #define SH_CMT32_CMCSR_STPF (1 << 11)
  124. #define SH_CMT32_CMCSR_SSIE (1 << 10)
  125. #define SH_CMT32_CMCSR_CMS (1 << 9)
  126. #define SH_CMT32_CMCSR_CMM (1 << 8)
  127. #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
  128. #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
  129. #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
  130. #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
  131. #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
  132. #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
  133. #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
  134. #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
  135. #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
  136. #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
  137. #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
  138. static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
  139. {
  140. return ioread16(base + (offs << 1));
  141. }
  142. static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
  143. {
  144. return ioread32(base + (offs << 2));
  145. }
  146. static void sh_cmt_write16(void __iomem *base, unsigned long offs,
  147. unsigned long value)
  148. {
  149. iowrite16(value, base + (offs << 1));
  150. }
  151. static void sh_cmt_write32(void __iomem *base, unsigned long offs,
  152. unsigned long value)
  153. {
  154. iowrite32(value, base + (offs << 2));
  155. }
  156. static const struct sh_cmt_info sh_cmt_info[] = {
  157. [SH_CMT_16BIT] = {
  158. .model = SH_CMT_16BIT,
  159. .width = 16,
  160. .overflow_bit = SH_CMT16_CMCSR_CMF,
  161. .clear_bits = ~SH_CMT16_CMCSR_CMF,
  162. .read_control = sh_cmt_read16,
  163. .write_control = sh_cmt_write16,
  164. .read_count = sh_cmt_read16,
  165. .write_count = sh_cmt_write16,
  166. },
  167. [SH_CMT_32BIT] = {
  168. .model = SH_CMT_32BIT,
  169. .width = 32,
  170. .overflow_bit = SH_CMT32_CMCSR_CMF,
  171. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  172. .read_control = sh_cmt_read16,
  173. .write_control = sh_cmt_write16,
  174. .read_count = sh_cmt_read32,
  175. .write_count = sh_cmt_write32,
  176. },
  177. [SH_CMT_32BIT_FAST] = {
  178. .model = SH_CMT_32BIT_FAST,
  179. .width = 32,
  180. .overflow_bit = SH_CMT32_CMCSR_CMF,
  181. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  182. .read_control = sh_cmt_read16,
  183. .write_control = sh_cmt_write16,
  184. .read_count = sh_cmt_read32,
  185. .write_count = sh_cmt_write32,
  186. },
  187. [SH_CMT_48BIT] = {
  188. .model = SH_CMT_48BIT,
  189. .width = 32,
  190. .overflow_bit = SH_CMT32_CMCSR_CMF,
  191. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  192. .read_control = sh_cmt_read32,
  193. .write_control = sh_cmt_write32,
  194. .read_count = sh_cmt_read32,
  195. .write_count = sh_cmt_write32,
  196. },
  197. [SH_CMT_48BIT_GEN2] = {
  198. .model = SH_CMT_48BIT_GEN2,
  199. .width = 32,
  200. .overflow_bit = SH_CMT32_CMCSR_CMF,
  201. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  202. .read_control = sh_cmt_read32,
  203. .write_control = sh_cmt_write32,
  204. .read_count = sh_cmt_read32,
  205. .write_count = sh_cmt_write32,
  206. },
  207. };
  208. #define CMCSR 0 /* channel register */
  209. #define CMCNT 1 /* channel register */
  210. #define CMCOR 2 /* channel register */
  211. static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
  212. {
  213. if (ch->iostart)
  214. return ch->cmt->info->read_control(ch->iostart, 0);
  215. else
  216. return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
  217. }
  218. static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
  219. unsigned long value)
  220. {
  221. if (ch->iostart)
  222. ch->cmt->info->write_control(ch->iostart, 0, value);
  223. else
  224. ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
  225. }
  226. static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
  227. {
  228. return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
  229. }
  230. static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
  231. unsigned long value)
  232. {
  233. ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
  234. }
  235. static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
  236. {
  237. return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
  238. }
  239. static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
  240. unsigned long value)
  241. {
  242. ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
  243. }
  244. static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
  245. unsigned long value)
  246. {
  247. ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
  248. }
  249. static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
  250. int *has_wrapped)
  251. {
  252. unsigned long v1, v2, v3;
  253. int o1, o2;
  254. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  255. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  256. do {
  257. o2 = o1;
  258. v1 = sh_cmt_read_cmcnt(ch);
  259. v2 = sh_cmt_read_cmcnt(ch);
  260. v3 = sh_cmt_read_cmcnt(ch);
  261. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  262. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  263. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  264. *has_wrapped = o1;
  265. return v2;
  266. }
  267. static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
  268. static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
  269. {
  270. unsigned long flags, value;
  271. /* start stop register shared by multiple timer channels */
  272. raw_spin_lock_irqsave(&sh_cmt_lock, flags);
  273. value = sh_cmt_read_cmstr(ch);
  274. if (start)
  275. value |= 1 << ch->timer_bit;
  276. else
  277. value &= ~(1 << ch->timer_bit);
  278. sh_cmt_write_cmstr(ch, value);
  279. raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
  280. }
  281. static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
  282. {
  283. int k, ret;
  284. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  285. dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
  286. /* enable clock */
  287. ret = clk_enable(ch->cmt->clk);
  288. if (ret) {
  289. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
  290. ch->index);
  291. goto err0;
  292. }
  293. /* make sure channel is disabled */
  294. sh_cmt_start_stop_ch(ch, 0);
  295. /* configure channel, periodic mode and maximum timeout */
  296. if (ch->cmt->info->width == 16) {
  297. *rate = clk_get_rate(ch->cmt->clk) / 512;
  298. sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
  299. SH_CMT16_CMCSR_CKS512);
  300. } else {
  301. *rate = clk_get_rate(ch->cmt->clk) / 8;
  302. sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
  303. SH_CMT32_CMCSR_CMTOUT_IE |
  304. SH_CMT32_CMCSR_CMR_IRQ |
  305. SH_CMT32_CMCSR_CKS_RCLK8);
  306. }
  307. sh_cmt_write_cmcor(ch, 0xffffffff);
  308. sh_cmt_write_cmcnt(ch, 0);
  309. /*
  310. * According to the sh73a0 user's manual, as CMCNT can be operated
  311. * only by the RCLK (Pseudo 32 KHz), there's one restriction on
  312. * modifying CMCNT register; two RCLK cycles are necessary before
  313. * this register is either read or any modification of the value
  314. * it holds is reflected in the LSI's actual operation.
  315. *
  316. * While at it, we're supposed to clear out the CMCNT as of this
  317. * moment, so make sure it's processed properly here. This will
  318. * take RCLKx2 at maximum.
  319. */
  320. for (k = 0; k < 100; k++) {
  321. if (!sh_cmt_read_cmcnt(ch))
  322. break;
  323. udelay(1);
  324. }
  325. if (sh_cmt_read_cmcnt(ch)) {
  326. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
  327. ch->index);
  328. ret = -ETIMEDOUT;
  329. goto err1;
  330. }
  331. /* enable channel */
  332. sh_cmt_start_stop_ch(ch, 1);
  333. return 0;
  334. err1:
  335. /* stop clock */
  336. clk_disable(ch->cmt->clk);
  337. err0:
  338. return ret;
  339. }
  340. static void sh_cmt_disable(struct sh_cmt_channel *ch)
  341. {
  342. /* disable channel */
  343. sh_cmt_start_stop_ch(ch, 0);
  344. /* disable interrupts in CMT block */
  345. sh_cmt_write_cmcsr(ch, 0);
  346. /* stop clock */
  347. clk_disable(ch->cmt->clk);
  348. dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
  349. pm_runtime_put(&ch->cmt->pdev->dev);
  350. }
  351. /* private flags */
  352. #define FLAG_CLOCKEVENT (1 << 0)
  353. #define FLAG_CLOCKSOURCE (1 << 1)
  354. #define FLAG_REPROGRAM (1 << 2)
  355. #define FLAG_SKIPEVENT (1 << 3)
  356. #define FLAG_IRQCONTEXT (1 << 4)
  357. static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
  358. int absolute)
  359. {
  360. unsigned long new_match;
  361. unsigned long value = ch->next_match_value;
  362. unsigned long delay = 0;
  363. unsigned long now = 0;
  364. int has_wrapped;
  365. now = sh_cmt_get_counter(ch, &has_wrapped);
  366. ch->flags |= FLAG_REPROGRAM; /* force reprogram */
  367. if (has_wrapped) {
  368. /* we're competing with the interrupt handler.
  369. * -> let the interrupt handler reprogram the timer.
  370. * -> interrupt number two handles the event.
  371. */
  372. ch->flags |= FLAG_SKIPEVENT;
  373. return;
  374. }
  375. if (absolute)
  376. now = 0;
  377. do {
  378. /* reprogram the timer hardware,
  379. * but don't save the new match value yet.
  380. */
  381. new_match = now + value + delay;
  382. if (new_match > ch->max_match_value)
  383. new_match = ch->max_match_value;
  384. sh_cmt_write_cmcor(ch, new_match);
  385. now = sh_cmt_get_counter(ch, &has_wrapped);
  386. if (has_wrapped && (new_match > ch->match_value)) {
  387. /* we are changing to a greater match value,
  388. * so this wrap must be caused by the counter
  389. * matching the old value.
  390. * -> first interrupt reprograms the timer.
  391. * -> interrupt number two handles the event.
  392. */
  393. ch->flags |= FLAG_SKIPEVENT;
  394. break;
  395. }
  396. if (has_wrapped) {
  397. /* we are changing to a smaller match value,
  398. * so the wrap must be caused by the counter
  399. * matching the new value.
  400. * -> save programmed match value.
  401. * -> let isr handle the event.
  402. */
  403. ch->match_value = new_match;
  404. break;
  405. }
  406. /* be safe: verify hardware settings */
  407. if (now < new_match) {
  408. /* timer value is below match value, all good.
  409. * this makes sure we won't miss any match events.
  410. * -> save programmed match value.
  411. * -> let isr handle the event.
  412. */
  413. ch->match_value = new_match;
  414. break;
  415. }
  416. /* the counter has reached a value greater
  417. * than our new match value. and since the
  418. * has_wrapped flag isn't set we must have
  419. * programmed a too close event.
  420. * -> increase delay and retry.
  421. */
  422. if (delay)
  423. delay <<= 1;
  424. else
  425. delay = 1;
  426. if (!delay)
  427. dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
  428. ch->index);
  429. } while (delay);
  430. }
  431. static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  432. {
  433. if (delta > ch->max_match_value)
  434. dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
  435. ch->index);
  436. ch->next_match_value = delta;
  437. sh_cmt_clock_event_program_verify(ch, 0);
  438. }
  439. static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  440. {
  441. unsigned long flags;
  442. raw_spin_lock_irqsave(&ch->lock, flags);
  443. __sh_cmt_set_next(ch, delta);
  444. raw_spin_unlock_irqrestore(&ch->lock, flags);
  445. }
  446. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  447. {
  448. struct sh_cmt_channel *ch = dev_id;
  449. /* clear flags */
  450. sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
  451. ch->cmt->info->clear_bits);
  452. /* update clock source counter to begin with if enabled
  453. * the wrap flag should be cleared by the timer specific
  454. * isr before we end up here.
  455. */
  456. if (ch->flags & FLAG_CLOCKSOURCE)
  457. ch->total_cycles += ch->match_value + 1;
  458. if (!(ch->flags & FLAG_REPROGRAM))
  459. ch->next_match_value = ch->max_match_value;
  460. ch->flags |= FLAG_IRQCONTEXT;
  461. if (ch->flags & FLAG_CLOCKEVENT) {
  462. if (!(ch->flags & FLAG_SKIPEVENT)) {
  463. if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
  464. ch->next_match_value = ch->max_match_value;
  465. ch->flags |= FLAG_REPROGRAM;
  466. }
  467. ch->ced.event_handler(&ch->ced);
  468. }
  469. }
  470. ch->flags &= ~FLAG_SKIPEVENT;
  471. if (ch->flags & FLAG_REPROGRAM) {
  472. ch->flags &= ~FLAG_REPROGRAM;
  473. sh_cmt_clock_event_program_verify(ch, 1);
  474. if (ch->flags & FLAG_CLOCKEVENT)
  475. if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
  476. || (ch->match_value == ch->next_match_value))
  477. ch->flags &= ~FLAG_REPROGRAM;
  478. }
  479. ch->flags &= ~FLAG_IRQCONTEXT;
  480. return IRQ_HANDLED;
  481. }
  482. static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
  483. {
  484. int ret = 0;
  485. unsigned long flags;
  486. raw_spin_lock_irqsave(&ch->lock, flags);
  487. if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  488. ret = sh_cmt_enable(ch, &ch->rate);
  489. if (ret)
  490. goto out;
  491. ch->flags |= flag;
  492. /* setup timeout if no clockevent */
  493. if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
  494. __sh_cmt_set_next(ch, ch->max_match_value);
  495. out:
  496. raw_spin_unlock_irqrestore(&ch->lock, flags);
  497. return ret;
  498. }
  499. static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
  500. {
  501. unsigned long flags;
  502. unsigned long f;
  503. raw_spin_lock_irqsave(&ch->lock, flags);
  504. f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  505. ch->flags &= ~flag;
  506. if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  507. sh_cmt_disable(ch);
  508. /* adjust the timeout to maximum if only clocksource left */
  509. if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
  510. __sh_cmt_set_next(ch, ch->max_match_value);
  511. raw_spin_unlock_irqrestore(&ch->lock, flags);
  512. }
  513. static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
  514. {
  515. return container_of(cs, struct sh_cmt_channel, cs);
  516. }
  517. static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
  518. {
  519. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  520. unsigned long flags, raw;
  521. unsigned long value;
  522. int has_wrapped;
  523. raw_spin_lock_irqsave(&ch->lock, flags);
  524. value = ch->total_cycles;
  525. raw = sh_cmt_get_counter(ch, &has_wrapped);
  526. if (unlikely(has_wrapped))
  527. raw += ch->match_value + 1;
  528. raw_spin_unlock_irqrestore(&ch->lock, flags);
  529. return value + raw;
  530. }
  531. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  532. {
  533. int ret;
  534. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  535. WARN_ON(ch->cs_enabled);
  536. ch->total_cycles = 0;
  537. ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  538. if (!ret) {
  539. __clocksource_updatefreq_hz(cs, ch->rate);
  540. ch->cs_enabled = true;
  541. }
  542. return ret;
  543. }
  544. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  545. {
  546. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  547. WARN_ON(!ch->cs_enabled);
  548. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  549. ch->cs_enabled = false;
  550. }
  551. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  552. {
  553. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  554. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  555. pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
  556. }
  557. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  558. {
  559. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  560. pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
  561. sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  562. }
  563. static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
  564. const char *name)
  565. {
  566. struct clocksource *cs = &ch->cs;
  567. cs->name = name;
  568. cs->rating = 125;
  569. cs->read = sh_cmt_clocksource_read;
  570. cs->enable = sh_cmt_clocksource_enable;
  571. cs->disable = sh_cmt_clocksource_disable;
  572. cs->suspend = sh_cmt_clocksource_suspend;
  573. cs->resume = sh_cmt_clocksource_resume;
  574. cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
  575. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  576. dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
  577. ch->index);
  578. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  579. clocksource_register_hz(cs, 1);
  580. return 0;
  581. }
  582. static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
  583. {
  584. return container_of(ced, struct sh_cmt_channel, ced);
  585. }
  586. static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
  587. {
  588. struct clock_event_device *ced = &ch->ced;
  589. sh_cmt_start(ch, FLAG_CLOCKEVENT);
  590. /* TODO: calculate good shift from rate and counter bit width */
  591. ced->shift = 32;
  592. ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift);
  593. ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
  594. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  595. if (periodic)
  596. sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);
  597. else
  598. sh_cmt_set_next(ch, ch->max_match_value);
  599. }
  600. static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
  601. struct clock_event_device *ced)
  602. {
  603. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  604. /* deal with old setting first */
  605. switch (ced->mode) {
  606. case CLOCK_EVT_MODE_PERIODIC:
  607. case CLOCK_EVT_MODE_ONESHOT:
  608. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  609. break;
  610. default:
  611. break;
  612. }
  613. switch (mode) {
  614. case CLOCK_EVT_MODE_PERIODIC:
  615. dev_info(&ch->cmt->pdev->dev,
  616. "ch%u: used for periodic clock events\n", ch->index);
  617. sh_cmt_clock_event_start(ch, 1);
  618. break;
  619. case CLOCK_EVT_MODE_ONESHOT:
  620. dev_info(&ch->cmt->pdev->dev,
  621. "ch%u: used for oneshot clock events\n", ch->index);
  622. sh_cmt_clock_event_start(ch, 0);
  623. break;
  624. case CLOCK_EVT_MODE_SHUTDOWN:
  625. case CLOCK_EVT_MODE_UNUSED:
  626. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  627. break;
  628. default:
  629. break;
  630. }
  631. }
  632. static int sh_cmt_clock_event_next(unsigned long delta,
  633. struct clock_event_device *ced)
  634. {
  635. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  636. BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
  637. if (likely(ch->flags & FLAG_IRQCONTEXT))
  638. ch->next_match_value = delta - 1;
  639. else
  640. sh_cmt_set_next(ch, delta - 1);
  641. return 0;
  642. }
  643. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  644. {
  645. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  646. pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
  647. clk_unprepare(ch->cmt->clk);
  648. }
  649. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  650. {
  651. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  652. clk_prepare(ch->cmt->clk);
  653. pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
  654. }
  655. static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
  656. const char *name)
  657. {
  658. struct clock_event_device *ced = &ch->ced;
  659. int irq;
  660. int ret;
  661. irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index);
  662. if (irq < 0) {
  663. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
  664. ch->index);
  665. return irq;
  666. }
  667. ret = request_irq(irq, sh_cmt_interrupt,
  668. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  669. dev_name(&ch->cmt->pdev->dev), ch);
  670. if (ret) {
  671. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
  672. ch->index, irq);
  673. return ret;
  674. }
  675. ced->name = name;
  676. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  677. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  678. ced->rating = 125;
  679. ced->cpumask = cpu_possible_mask;
  680. ced->set_next_event = sh_cmt_clock_event_next;
  681. ced->set_mode = sh_cmt_clock_event_mode;
  682. ced->suspend = sh_cmt_clock_event_suspend;
  683. ced->resume = sh_cmt_clock_event_resume;
  684. dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
  685. ch->index);
  686. clockevents_register_device(ced);
  687. return 0;
  688. }
  689. static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
  690. bool clockevent, bool clocksource)
  691. {
  692. int ret;
  693. if (clockevent) {
  694. ch->cmt->has_clockevent = true;
  695. ret = sh_cmt_register_clockevent(ch, name);
  696. if (ret < 0)
  697. return ret;
  698. }
  699. if (clocksource) {
  700. ch->cmt->has_clocksource = true;
  701. sh_cmt_register_clocksource(ch, name);
  702. }
  703. return 0;
  704. }
  705. static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
  706. unsigned int hwidx, bool clockevent,
  707. bool clocksource, struct sh_cmt_device *cmt)
  708. {
  709. int ret;
  710. /* Skip unused channels. */
  711. if (!clockevent && !clocksource)
  712. return 0;
  713. ch->cmt = cmt;
  714. ch->index = index;
  715. ch->hwidx = hwidx;
  716. /*
  717. * Compute the address of the channel control register block. For the
  718. * timers with a per-channel start/stop register, compute its address
  719. * as well.
  720. *
  721. * For legacy configuration the address has been mapped explicitly.
  722. */
  723. if (cmt->legacy) {
  724. ch->ioctrl = cmt->mapbase_ch;
  725. } else {
  726. switch (cmt->info->model) {
  727. case SH_CMT_16BIT:
  728. ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
  729. break;
  730. case SH_CMT_32BIT:
  731. case SH_CMT_48BIT:
  732. ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
  733. break;
  734. case SH_CMT_32BIT_FAST:
  735. /*
  736. * The 32-bit "fast" timer has a single channel at hwidx
  737. * 5 but is located at offset 0x40 instead of 0x60 for
  738. * some reason.
  739. */
  740. ch->ioctrl = cmt->mapbase + 0x40;
  741. break;
  742. case SH_CMT_48BIT_GEN2:
  743. ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
  744. ch->ioctrl = ch->iostart + 0x10;
  745. break;
  746. }
  747. }
  748. if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
  749. ch->max_match_value = ~0;
  750. else
  751. ch->max_match_value = (1 << cmt->info->width) - 1;
  752. ch->match_value = ch->max_match_value;
  753. raw_spin_lock_init(&ch->lock);
  754. if (cmt->legacy) {
  755. ch->timer_bit = ch->hwidx;
  756. } else {
  757. ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
  758. ? 0 : ch->hwidx;
  759. }
  760. ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
  761. clockevent, clocksource);
  762. if (ret) {
  763. dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
  764. ch->index);
  765. return ret;
  766. }
  767. ch->cs_enabled = false;
  768. return 0;
  769. }
  770. static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
  771. {
  772. struct resource *mem;
  773. mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
  774. if (!mem) {
  775. dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
  776. return -ENXIO;
  777. }
  778. cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
  779. if (cmt->mapbase == NULL) {
  780. dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
  781. return -ENXIO;
  782. }
  783. return 0;
  784. }
  785. static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt)
  786. {
  787. struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
  788. struct resource *res, *res2;
  789. /* map memory, let mapbase_ch point to our channel */
  790. res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
  791. if (!res) {
  792. dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
  793. return -ENXIO;
  794. }
  795. cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
  796. if (cmt->mapbase_ch == NULL) {
  797. dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
  798. return -ENXIO;
  799. }
  800. /* optional resource for the shared timer start/stop register */
  801. res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
  802. /* map second resource for CMSTR */
  803. cmt->mapbase = ioremap_nocache(res2 ? res2->start :
  804. res->start - cfg->channel_offset,
  805. res2 ? resource_size(res2) : 2);
  806. if (cmt->mapbase == NULL) {
  807. dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
  808. iounmap(cmt->mapbase_ch);
  809. return -ENXIO;
  810. }
  811. /* identify the model based on the resources */
  812. if (resource_size(res) == 6)
  813. cmt->info = &sh_cmt_info[SH_CMT_16BIT];
  814. else if (res2 && (resource_size(res2) == 4))
  815. cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
  816. else
  817. cmt->info = &sh_cmt_info[SH_CMT_32BIT];
  818. return 0;
  819. }
  820. static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt)
  821. {
  822. iounmap(cmt->mapbase);
  823. if (cmt->mapbase_ch)
  824. iounmap(cmt->mapbase_ch);
  825. }
  826. static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
  827. {
  828. struct sh_timer_config *cfg = pdev->dev.platform_data;
  829. const struct platform_device_id *id = pdev->id_entry;
  830. unsigned int hw_channels;
  831. int ret;
  832. memset(cmt, 0, sizeof(*cmt));
  833. cmt->pdev = pdev;
  834. if (!cfg) {
  835. dev_err(&cmt->pdev->dev, "missing platform data\n");
  836. return -ENXIO;
  837. }
  838. cmt->info = (const struct sh_cmt_info *)id->driver_data;
  839. cmt->legacy = cmt->info ? false : true;
  840. /* Get hold of clock. */
  841. cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck");
  842. if (IS_ERR(cmt->clk)) {
  843. dev_err(&cmt->pdev->dev, "cannot get clock\n");
  844. return PTR_ERR(cmt->clk);
  845. }
  846. ret = clk_prepare(cmt->clk);
  847. if (ret < 0)
  848. goto err_clk_put;
  849. /*
  850. * Map the memory resource(s). We need to support both the legacy
  851. * platform device configuration (with one device per channel) and the
  852. * new version (with multiple channels per device).
  853. */
  854. if (cmt->legacy)
  855. ret = sh_cmt_map_memory_legacy(cmt);
  856. else
  857. ret = sh_cmt_map_memory(cmt);
  858. if (ret < 0)
  859. goto err_clk_unprepare;
  860. /* Allocate and setup the channels. */
  861. if (cmt->legacy) {
  862. cmt->num_channels = 1;
  863. hw_channels = 0;
  864. } else {
  865. cmt->num_channels = hweight8(cfg->channels_mask);
  866. hw_channels = cfg->channels_mask;
  867. }
  868. cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
  869. GFP_KERNEL);
  870. if (cmt->channels == NULL) {
  871. ret = -ENOMEM;
  872. goto err_unmap;
  873. }
  874. if (cmt->legacy) {
  875. ret = sh_cmt_setup_channel(&cmt->channels[0],
  876. cfg->timer_bit, cfg->timer_bit,
  877. cfg->clockevent_rating != 0,
  878. cfg->clocksource_rating != 0, cmt);
  879. if (ret < 0)
  880. goto err_unmap;
  881. } else {
  882. unsigned int mask = hw_channels;
  883. unsigned int i;
  884. /*
  885. * Use the first channel as a clock event device and the second
  886. * channel as a clock source. If only one channel is available
  887. * use it for both.
  888. */
  889. for (i = 0; i < cmt->num_channels; ++i) {
  890. unsigned int hwidx = ffs(mask) - 1;
  891. bool clocksource = i == 1 || cmt->num_channels == 1;
  892. bool clockevent = i == 0;
  893. ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
  894. clockevent, clocksource,
  895. cmt);
  896. if (ret < 0)
  897. goto err_unmap;
  898. mask &= ~(1 << hwidx);
  899. }
  900. }
  901. platform_set_drvdata(pdev, cmt);
  902. return 0;
  903. err_unmap:
  904. kfree(cmt->channels);
  905. sh_cmt_unmap_memory(cmt);
  906. err_clk_unprepare:
  907. clk_unprepare(cmt->clk);
  908. err_clk_put:
  909. clk_put(cmt->clk);
  910. return ret;
  911. }
  912. static int sh_cmt_probe(struct platform_device *pdev)
  913. {
  914. struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
  915. int ret;
  916. if (!is_early_platform_device(pdev)) {
  917. pm_runtime_set_active(&pdev->dev);
  918. pm_runtime_enable(&pdev->dev);
  919. }
  920. if (cmt) {
  921. dev_info(&pdev->dev, "kept as earlytimer\n");
  922. goto out;
  923. }
  924. cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
  925. if (cmt == NULL)
  926. return -ENOMEM;
  927. ret = sh_cmt_setup(cmt, pdev);
  928. if (ret) {
  929. kfree(cmt);
  930. pm_runtime_idle(&pdev->dev);
  931. return ret;
  932. }
  933. if (is_early_platform_device(pdev))
  934. return 0;
  935. out:
  936. if (cmt->has_clockevent || cmt->has_clocksource)
  937. pm_runtime_irq_safe(&pdev->dev);
  938. else
  939. pm_runtime_idle(&pdev->dev);
  940. return 0;
  941. }
  942. static int sh_cmt_remove(struct platform_device *pdev)
  943. {
  944. return -EBUSY; /* cannot unregister clockevent and clocksource */
  945. }
  946. static const struct platform_device_id sh_cmt_id_table[] = {
  947. { "sh_cmt", 0 },
  948. { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
  949. { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
  950. { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
  951. { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
  952. { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
  953. { }
  954. };
  955. MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
  956. static struct platform_driver sh_cmt_device_driver = {
  957. .probe = sh_cmt_probe,
  958. .remove = sh_cmt_remove,
  959. .driver = {
  960. .name = "sh_cmt",
  961. },
  962. .id_table = sh_cmt_id_table,
  963. };
  964. static int __init sh_cmt_init(void)
  965. {
  966. return platform_driver_register(&sh_cmt_device_driver);
  967. }
  968. static void __exit sh_cmt_exit(void)
  969. {
  970. platform_driver_unregister(&sh_cmt_device_driver);
  971. }
  972. early_platform_init("earlytimer", &sh_cmt_device_driver);
  973. subsys_initcall(sh_cmt_init);
  974. module_exit(sh_cmt_exit);
  975. MODULE_AUTHOR("Magnus Damm");
  976. MODULE_DESCRIPTION("SuperH CMT Timer Driver");
  977. MODULE_LICENSE("GPL v2");