arc_timer.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
  10. * programmed to go from @count to @limit and optionally interrupt.
  11. * We've designated TIMER0 for clockevents and TIMER1 for clocksource
  12. *
  13. * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
  14. * which are suitable for UP and SMP based clocksources respectively
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/clk.h>
  18. #include <linux/clk-provider.h>
  19. #include <linux/clocksource.h>
  20. #include <linux/clockchips.h>
  21. #include <linux/cpu.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <soc/arc/timers.h>
  25. #include <soc/arc/mcip.h>
  26. static unsigned long arc_timer_freq;
  27. static int noinline arc_get_timer_clk(struct device_node *node)
  28. {
  29. struct clk *clk;
  30. int ret;
  31. clk = of_clk_get(node, 0);
  32. if (IS_ERR(clk)) {
  33. pr_err("timer missing clk\n");
  34. return PTR_ERR(clk);
  35. }
  36. ret = clk_prepare_enable(clk);
  37. if (ret) {
  38. pr_err("Couldn't enable parent clk\n");
  39. return ret;
  40. }
  41. arc_timer_freq = clk_get_rate(clk);
  42. return 0;
  43. }
  44. /********** Clock Source Device *********/
  45. #ifdef CONFIG_ARC_TIMERS_64BIT
  46. static u64 arc_read_gfrc(struct clocksource *cs)
  47. {
  48. unsigned long flags;
  49. u32 l, h;
  50. /*
  51. * From a programming model pov, there seems to be just one instance of
  52. * MCIP_CMD/MCIP_READBACK however micro-architecturally there's
  53. * an instance PER ARC CORE (not per cluster), and there are dedicated
  54. * hardware decode logic (per core) inside ARConnect to handle
  55. * simultaneous read/write accesses from cores via those two registers.
  56. * So several concurrent commands to ARConnect are OK if they are
  57. * trying to access two different sub-components (like GFRC,
  58. * inter-core interrupt, etc...). HW also supports simultaneously
  59. * accessing GFRC by multiple cores.
  60. * That's why it is safe to disable hard interrupts on the local CPU
  61. * before access to GFRC instead of taking global MCIP spinlock
  62. * defined in arch/arc/kernel/mcip.c
  63. */
  64. local_irq_save(flags);
  65. __mcip_cmd(CMD_GFRC_READ_LO, 0);
  66. l = read_aux_reg(ARC_REG_MCIP_READBACK);
  67. __mcip_cmd(CMD_GFRC_READ_HI, 0);
  68. h = read_aux_reg(ARC_REG_MCIP_READBACK);
  69. local_irq_restore(flags);
  70. return (((u64)h) << 32) | l;
  71. }
  72. static struct clocksource arc_counter_gfrc = {
  73. .name = "ARConnect GFRC",
  74. .rating = 400,
  75. .read = arc_read_gfrc,
  76. .mask = CLOCKSOURCE_MASK(64),
  77. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  78. };
  79. static int __init arc_cs_setup_gfrc(struct device_node *node)
  80. {
  81. struct mcip_bcr mp;
  82. int ret;
  83. READ_BCR(ARC_REG_MCIP_BCR, mp);
  84. if (!mp.gfrc) {
  85. pr_warn("Global-64-bit-Ctr clocksource not detected\n");
  86. return -ENXIO;
  87. }
  88. ret = arc_get_timer_clk(node);
  89. if (ret)
  90. return ret;
  91. return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
  92. }
  93. TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
  94. #define AUX_RTC_CTRL 0x103
  95. #define AUX_RTC_LOW 0x104
  96. #define AUX_RTC_HIGH 0x105
  97. static u64 arc_read_rtc(struct clocksource *cs)
  98. {
  99. unsigned long status;
  100. u32 l, h;
  101. /*
  102. * hardware has an internal state machine which tracks readout of
  103. * low/high and updates the CTRL.status if
  104. * - interrupt/exception taken between the two reads
  105. * - high increments after low has been read
  106. */
  107. do {
  108. l = read_aux_reg(AUX_RTC_LOW);
  109. h = read_aux_reg(AUX_RTC_HIGH);
  110. status = read_aux_reg(AUX_RTC_CTRL);
  111. } while (!(status & _BITUL(31)));
  112. return (((u64)h) << 32) | l;
  113. }
  114. static struct clocksource arc_counter_rtc = {
  115. .name = "ARCv2 RTC",
  116. .rating = 350,
  117. .read = arc_read_rtc,
  118. .mask = CLOCKSOURCE_MASK(64),
  119. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  120. };
  121. static int __init arc_cs_setup_rtc(struct device_node *node)
  122. {
  123. struct bcr_timer timer;
  124. int ret;
  125. READ_BCR(ARC_REG_TIMERS_BCR, timer);
  126. if (!timer.rtc) {
  127. pr_warn("Local-64-bit-Ctr clocksource not detected\n");
  128. return -ENXIO;
  129. }
  130. /* Local to CPU hence not usable in SMP */
  131. if (IS_ENABLED(CONFIG_SMP)) {
  132. pr_warn("Local-64-bit-Ctr not usable in SMP\n");
  133. return -EINVAL;
  134. }
  135. ret = arc_get_timer_clk(node);
  136. if (ret)
  137. return ret;
  138. write_aux_reg(AUX_RTC_CTRL, 1);
  139. return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
  140. }
  141. TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
  142. #endif
  143. /*
  144. * 32bit TIMER1 to keep counting monotonically and wraparound
  145. */
  146. static u64 arc_read_timer1(struct clocksource *cs)
  147. {
  148. return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
  149. }
  150. static struct clocksource arc_counter_timer1 = {
  151. .name = "ARC Timer1",
  152. .rating = 300,
  153. .read = arc_read_timer1,
  154. .mask = CLOCKSOURCE_MASK(32),
  155. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  156. };
  157. static int __init arc_cs_setup_timer1(struct device_node *node)
  158. {
  159. int ret;
  160. /* Local to CPU hence not usable in SMP */
  161. if (IS_ENABLED(CONFIG_SMP))
  162. return -EINVAL;
  163. ret = arc_get_timer_clk(node);
  164. if (ret)
  165. return ret;
  166. write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
  167. write_aux_reg(ARC_REG_TIMER1_CNT, 0);
  168. write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
  169. return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
  170. }
  171. /********** Clock Event Device *********/
  172. static int arc_timer_irq;
  173. /*
  174. * Arm the timer to interrupt after @cycles
  175. * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
  176. */
  177. static void arc_timer_event_setup(unsigned int cycles)
  178. {
  179. write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
  180. write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
  181. write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
  182. }
  183. static int arc_clkevent_set_next_event(unsigned long delta,
  184. struct clock_event_device *dev)
  185. {
  186. arc_timer_event_setup(delta);
  187. return 0;
  188. }
  189. static int arc_clkevent_set_periodic(struct clock_event_device *dev)
  190. {
  191. /*
  192. * At X Hz, 1 sec = 1000ms -> X cycles;
  193. * 10ms -> X / 100 cycles
  194. */
  195. arc_timer_event_setup(arc_timer_freq / HZ);
  196. return 0;
  197. }
  198. static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
  199. .name = "ARC Timer0",
  200. .features = CLOCK_EVT_FEAT_ONESHOT |
  201. CLOCK_EVT_FEAT_PERIODIC,
  202. .rating = 300,
  203. .set_next_event = arc_clkevent_set_next_event,
  204. .set_state_periodic = arc_clkevent_set_periodic,
  205. };
  206. static irqreturn_t timer_irq_handler(int irq, void *dev_id)
  207. {
  208. /*
  209. * Note that generic IRQ core could have passed @evt for @dev_id if
  210. * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
  211. */
  212. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  213. int irq_reenable = clockevent_state_periodic(evt);
  214. /*
  215. * 1. ACK the interrupt
  216. * - For ARC700, any write to CTRL reg ACKs it, so just rewrite
  217. * Count when [N]ot [H]alted bit.
  218. * - For HS3x, it is a bit subtle. On taken count-down interrupt,
  219. * IP bit [3] is set, which needs to be cleared for ACK'ing.
  220. * The write below can only update the other two bits, hence
  221. * explicitly clears IP bit
  222. * 2. Re-arm interrupt if periodic by writing to IE bit [0]
  223. */
  224. write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
  225. evt->event_handler(evt);
  226. return IRQ_HANDLED;
  227. }
  228. static int arc_timer_starting_cpu(unsigned int cpu)
  229. {
  230. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  231. evt->cpumask = cpumask_of(smp_processor_id());
  232. clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
  233. enable_percpu_irq(arc_timer_irq, 0);
  234. return 0;
  235. }
  236. static int arc_timer_dying_cpu(unsigned int cpu)
  237. {
  238. disable_percpu_irq(arc_timer_irq);
  239. return 0;
  240. }
  241. /*
  242. * clockevent setup for boot CPU
  243. */
  244. static int __init arc_clockevent_setup(struct device_node *node)
  245. {
  246. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  247. int ret;
  248. arc_timer_irq = irq_of_parse_and_map(node, 0);
  249. if (arc_timer_irq <= 0) {
  250. pr_err("clockevent: missing irq\n");
  251. return -EINVAL;
  252. }
  253. ret = arc_get_timer_clk(node);
  254. if (ret) {
  255. pr_err("clockevent: missing clk\n");
  256. return ret;
  257. }
  258. /* Needs apriori irq_set_percpu_devid() done in intc map function */
  259. ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
  260. "Timer0 (per-cpu-tick)", evt);
  261. if (ret) {
  262. pr_err("clockevent: unable to request irq\n");
  263. return ret;
  264. }
  265. ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
  266. "clockevents/arc/timer:starting",
  267. arc_timer_starting_cpu,
  268. arc_timer_dying_cpu);
  269. if (ret) {
  270. pr_err("Failed to setup hotplug state\n");
  271. return ret;
  272. }
  273. return 0;
  274. }
  275. static int __init arc_of_timer_init(struct device_node *np)
  276. {
  277. static int init_count = 0;
  278. int ret;
  279. if (!init_count) {
  280. init_count = 1;
  281. ret = arc_clockevent_setup(np);
  282. } else {
  283. ret = arc_cs_setup_timer1(np);
  284. }
  285. return ret;
  286. }
  287. TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);